Browse Source

Larger changes

dev
root 4 years ago
parent
commit
a17f7f8838
  1. 3
      DOC.md
  2. 7
      Makefile
  3. 3
      README.md
  4. 3
      ansible.cfg
  5. 3
      roles/lxc_swarm/tasks/deploy.yml
  6. 4
      roles/lxc_swarm/tasks/lxc_nodes.yml
  7. 4
      roles/lxc_swarm/tasks/main.yml
  8. 3
      roles/lxc_swarm/tasks/swarm.yml
  9. 4
      roles/lxc_swarm/templates/Dockerfile.j2
  10. 5
      roles/lxc_swarm/templates/galera.cnf.j2
  11. 5
      roles/lxc_swarm/vars/main.yml
  12. 26
      stack.yaml

3
DOC.md

@ -3,3 +3,6 @@
Command for checking the docker node status: ```docker node ls``` Command for checking the docker node status: ```docker node ls```
Command for using and attaching to LXC container (e.g. to 'worker1'): ```lxc-attach --name worker1```. Command for using and attaching to LXC container (e.g. to 'worker1'): ```lxc-attach --name worker1```.
One possible limitation of this process, is that static ip assignments [are missing in docker
swarm](https://forums.docker.com/t/docker-swarm-1-13-static-ips-for-containers/28060/13).

7
Makefile

@ -1,4 +1,4 @@
all: preautomation provision config swarm_deploy all: preautomation provision swarm_deploy
preautomation: preautomation:
/bin/bash ./ansible_install.sh /bin/bash ./ansible_install.sh
@ -6,8 +6,5 @@ preautomation:
provision: provision:
/bin/bash ./ansible_provisioning.sh /bin/bash ./ansible_provisioning.sh
config:
/bin/bash ./ansible_swarm_config.sh
swarm_deploy: swarm_deploy:
docker stack deploy -c swarm-galera.yml stack /bin/bash ./ansible_swarm_config.sh

3
README.md

@ -4,6 +4,9 @@
* Linux Containers (LXC) for swarm worker nodes * Linux Containers (LXC) for swarm worker nodes
* Ansible for automatic deployment * Ansible for automatic deployment
* Galera-inside-docker for MariaDB clustering * Galera-inside-docker for MariaDB clustering
As an operating system we used the same for all docker nodes (Host machine & LXC containers):
* Debian 9.3 (Stretch) with Linux Kernel 4.9.0-5-amd64
In more detail, the manager node is running on the host machine itself, while the provisioning of LXC containers permits the creation of multiple worker nodes on which the galera cluster runs, running on the same host without the need for separate worker machines. In more detail, the manager node is running on the host machine itself, while the provisioning of LXC containers permits the creation of multiple worker nodes on which the galera cluster runs, running on the same host without the need for separate worker machines.

3
ansible.cfg

@ -1,4 +1,5 @@
[defaults] [defaults]
#added parameters for actual management of LXC nodes with ansible because of bridged networking packet loss #added parameters for actual management of LXC nodes with ansible because of bridged networking packet loss
gather_timeout = 30 gather_timeout = 30
timeout = 30 timeout = 45
host_key_checking = False

3
roles/lxc_swarm/tasks/deploy.yml

@ -0,0 +1,3 @@
---
- name: Deploy swarm stack
shell: docker stack deploy -c stack.yaml stack

4
roles/lxc_swarm/tasks/lxc_nodes.yml

@ -18,6 +18,10 @@
- "worker2" - "worker2"
when: inventory_hostname in groups['manager'] when: inventory_hostname in groups['manager']
- name: Pause execution for 20 seconds
pause:
seconds: 20
- name: Start lxc containers - name: Start lxc containers
shell: lxc-start --name {{ item }} shell: lxc-start --name {{ item }}
with_items: with_items:

4
roles/lxc_swarm/tasks/main.yml

@ -20,3 +20,7 @@
- include: Dockerfile.yml - include: Dockerfile.yml
when: inventory_hostname in groups['workers'] when: inventory_hostname in groups['workers']
tags: swarm tags: swarm
- include: deploy.yml
when: inventory_hostname in groups['manager']
tags: swarm

3
roles/lxc_swarm/tasks/swarm.yml

@ -23,7 +23,8 @@
file: file:
path: /var/container_data/mysql path: /var/container_data/mysql
state: directory state: directory
mode: '0755' mode: '0777'
recurse: true
when: inventory_hostname in groups['workers'] when: inventory_hostname in groups['workers']
- name: Add docker labels for galera master and secondary nodes - name: Add docker labels for galera master and secondary nodes

4
roles/lxc_swarm/templates/Dockerfile.j2

@ -6,6 +6,6 @@ RUN add-apt-repository "deb [arch=amd64,arm64,i386,ppc64el] http://mirrors.corei
RUN apt-get update RUN apt-get update
RUN apt install -y rsync mariadb-server-10.4 galera-4 mariadb-client-10.4 RUN apt install -y rsync mariadb-server-10.4 galera-4 mariadb-client-10.4
RUN systemctl unmask mariadb RUN systemctl unmask mariadb
COPY /opt/galera.cnf /etc/mysql/conf.d/galera.cnf COPY galera.cnf /etc/mysql/conf.d/galera.cnf
RUN echo "wsrep_node_address=`ip a s eth0|grep inet|head -1|awk '{print $2}'|cut -d "/" -f 1`" >> /etc/mysql/conf.d/galera.cnf #RUN echo "wsrep_node_address=`ip a s eth0|grep inet|head -1|awk '{print $2}'|cut -d "/" -f 1`" >> /etc/mysql/conf.d/galera.cnf
ENTRYPOINT ["mysqld","{{ extra_docker_options }}"] ENTRYPOINT ["mysqld","{{ extra_docker_options }}"]

5
roles/lxc_swarm/templates/galera.cnf.j2

@ -10,7 +10,8 @@ wsrep_provider=/usr/lib/galera/libgalera_smm.so
# Galera Cluster Configuration # Galera Cluster Configuration
wsrep_cluster_name="galera_cluster" wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://{% for host in groups['workers'] %} {{hostvars[host]['ansible_default_ipv4']['address']}} {% endfor %}" #wsrep_cluster_address="gcomm://{% for host in groups['workers'] %} {{hostvars[host]['ansible_default_ipv4']['address']}} {% endfor %}"
wsrep_cluster_address="gcomm://{% for address in container_address %} {{address}} {% endfor %}"
# Galera Synchronization Configuration # Galera Synchronization Configuration
wsrep_sst_method=rsync wsrep_sst_method=rsync
@ -18,4 +19,6 @@ wsrep_sst_method=rsync
# Galera Node Configuration # Galera Node Configuration
#wsrep_node_address="{{ ansible_default_ipv4.address }}" #wsrep_node_address="{{ ansible_default_ipv4.address }}"
#wsrep_node_name="{{ ansible_hostname }}" #wsrep_node_name="{{ ansible_hostname }}"
wsrep_node_address="{{ container_address[0] if ansible_hostname in groups['workers'][0] else container_address[1] }}
wsrep_node_name="{{ nodes[0] if ansible_hostname in groups['workers'][0] else nodes[1] }}" wsrep_node_name="{{ nodes[0] if ansible_hostname in groups['workers'][0] else nodes[1] }}"

5
roles/lxc_swarm/vars/main.yml

@ -1,5 +1,5 @@
interface: lxcbr0 interface: lxcbr0
extra_docker_options: "{{ ' --wsrep-new-cluster' if ansible_hostname == 'worker1' else '' }}" extra_docker_options: "{{ '--wsrep-new-cluster' if ansible_hostname == 'worker1' else '' }}"
workers: workers:
- "10.0.3.100" - "10.0.3.100"
- "10.0.3.101" - "10.0.3.101"
@ -7,3 +7,6 @@ workers:
nodes: nodes:
- "node1" - "node1"
- "node2" - "node2"
container_address:
- "10.0.3.10"
- "10.0.3.11"

26
stack.yaml

@ -14,22 +14,27 @@ services:
# - TASKID={{.Task.ID}} # - TASKID={{.Task.ID}}
# - TASKNAME={{.Task.Name}} # - TASKNAME={{.Task.Name}}
# - TASKREPID={{.Task.Slot}} # - TASKREPID={{.Task.Slot}}
hostname: node1 hostname: node1
# Storage volume
volumes: volumes:
- /var/container_data/mysql:/var/lib/mysql - /var/container_data/mysql:/var/lib/mysql
networks: networks:
galera_net: galera_net:
ipv4_address: 10.0.3.10
deploy: deploy:
# Replicated mode
mode: replicated
replicas: 1 replicas: 1
restart_policy: restart_policy:
delay: 10s delay: 10s
max_attempts: 10 max_attempts: 10
window: 60s window: 60s
placement: placement:
# Services must run only on workers with one service per container
constraints: constraints:
- node.labels.node1 == true - node.labels.node1 == true
- node.role == worker - node.role == worker
@ -54,26 +59,39 @@ services:
image: ubuntu:galera-node-worker2 image: ubuntu:galera-node-worker2
# environment:
# - NODENAME={{.Node.Hostname}}
# - NODEID={{.Node.ID}}
# - SERVICEID={{.Service.ID}}
# - SERVICENAME={{.Service.Name}}
# - TASKID={{.Task.ID}}
# - TASKNAME={{.Task.Name}}
# - TASKREPID={{.Task.Slot}}
hostname: node2 hostname: node2
# Storage volume
volumes: volumes:
- /var/container_data/mysql:/var/lib/mysql - /var/container_data/mysql:/var/lib/mysql
networks: networks:
galera_net: galera_net:
ipv4_address: 10.0.3.11
deploy: deploy:
# Replicated mode
mode: replicated
replicas: 1 replicas: 1
restart_policy: restart_policy:
delay: 10s delay: 10s
max_attempts: 10 max_attempts: 10
window: 60s window: 60s
placement: placement:
# Services must run only on workers with one service per container
constraints: constraints:
- node.labels.node2 == true - node.labels.node2 == true
- node.role == worker - node.role == worker
# - node.group == galera # - node.group == galera
depends_on: depends_on:
- db_cluster_node1 - db_cluster_node1
ports: ports:

Loading…
Cancel
Save