cisco

Home Shitty cloud provider
Log | Files | Refs | Submodules | git clone https://git.ne02ptzero.me/git/cisco

commit adc1e219951039c7e29ffcd055746f422b19aa86
parent 606616380545f1df8e900af5cbe53dc0d7f7269f
Author: Louis Solofrizzo <louis@ne02ptzero.me>
Date:   Sat, 30 Mar 2019 15:17:55 +0100

Ansible: Add scripts and deployment

Signed-off-by: Louis Solofrizzo <louis@ne02ptzero.me>

Diffstat:
Adeploy/CMakeLists.txt | 3+++
Adeploy/common.yml | 17+++++++++++++++++
Adeploy/conf/bashrc | 7+++++++
Adeploy/conf/common.conf | 54++++++++++++++++++++++++++++++++++++++++++++++++++++++
Adeploy/conf/default.conf | 4++++
Adeploy/conf/default.conf-3.0 | 4++++
Adeploy/conf/lf-cloud-api.service | 12++++++++++++
Adeploy/conf/lf-cloud-api.yaml | 12++++++++++++
Adeploy/conf/lf-slave.service | 12++++++++++++
Adeploy/conf/lf-slave.yaml | 2++
Adeploy/conf/lxc-net | 31+++++++++++++++++++++++++++++++
Adeploy/conf/motd | 14++++++++++++++
Adeploy/deploy.yml | 3+++
Adeploy/master.retry | 1+
Adeploy/master.yml | 31+++++++++++++++++++++++++++++++
Adeploy/prd/deploy.retry | 1+
Adeploy/prd/deploy.yml | 1+
Adeploy/prd/inventory.yml | 19+++++++++++++++++++
Adeploy/slaves.yml | 50++++++++++++++++++++++++++++++++++++++++++++++++++
Ascripts/dyn.py | 123+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
20 files changed, 401 insertions(+), 0 deletions(-)

diff --git a/deploy/CMakeLists.txt b/deploy/CMakeLists.txt @@ -0,0 +1,3 @@ +generate_deploy_targets_ansible(cisco prd) +generate_deploy_playbook_ansible(cisco prd master) +generate_deploy_playbook_ansible(cisco prd slaves) diff --git a/deploy/common.yml b/deploy/common.yml @@ -0,0 +1,17 @@ +- name: Main configuration + hosts: "*slave*" + tasks: + - name: Set hostnames + hostname: + name: "{{ ansible_host }}" + - name: Get physical node name + shell: "cat /etc/physical || mount | head -n1 | cut -d'/' -f4 | cut -d' ' -f1" + register: physical_node_name + - name: Upload motd + template: + src: ./conf/motd + dest: /etc/motd + - name: Upload bashrc + template: + src: ./conf/bashrc + dest: /root/.bashrc diff --git a/deploy/conf/bashrc b/deploy/conf/bashrc @@ -0,0 +1,7 @@ +# Prompt +export PS1="\[\e[90m\][ \[\e[94m\]{{ product_name }}\[\e[90m\] ] [ \[\e[92m\]\u\[\e[90m\] ] [ \[\e[93m\]\h\[\e[90m\] ] (\[\e[37m\]\W\[\e[90m\]) \[\e[0m\]\$> " + +# Aliases +alias l='ls --color' +alias ll='ls -la --color' +alias c='cd ..' diff --git a/deploy/conf/common.conf b/deploy/conf/common.conf @@ -0,0 +1,54 @@ +# Default configuration shared by all containers + +# Setup the LXC devices in /dev/lxc/ +lxc.tty.dir = lxc + +# Allow for 1024 pseudo terminals +lxc.pty.max = 1024 + +# Setup 4 tty devices +lxc.tty.max = 4 + +# Drop some harmful capabilities +lxc.cap.drop = mac_admin mac_override sys_time sys_module sys_rawio + +# Ensure hostname is changed on clone +lxc.hook.clone = /usr/share/lxc/hooks/clonehostname + +# CGroup whitelist +lxc.cgroup.devices.deny = a +## Allow any mknod (but not reading/writing the node) +lxc.cgroup.devices.allow = c *:* m +lxc.cgroup.devices.allow = b *:* m +## Allow specific devices +### /dev/null +lxc.cgroup.devices.allow = c 1:3 rwm +### /dev/zero +lxc.cgroup.devices.allow = c 1:5 rwm +### /dev/full +lxc.cgroup.devices.allow = c 1:7 rwm +### /dev/tty +lxc.cgroup.devices.allow = c 5:0 rwm +### /dev/console +lxc.cgroup.devices.allow = c 5:1 rwm +### /dev/ptmx +lxc.cgroup.devices.allow = c 5:2 rwm +### /dev/random +lxc.cgroup.devices.allow = c 1:8 rwm +### /dev/urandom +lxc.cgroup.devices.allow = c 1:9 rwm +### /dev/pts/* +lxc.cgroup.devices.allow = c 136:* rwm +### fuse +lxc.cgroup.devices.allow = c 10:229 rwm + +# Setup the default mounts +lxc.mount.auto = cgroup:mixed proc:mixed sys:mixed +lxc.mount.entry = /sys/fs/fuse/connections sys/fs/fuse/connections none bind,optional 0 0 + +# Blacklist some syscalls which are not safe in privileged +# containers +lxc.seccomp.profile = /usr/share/lxc/config/common.seccomp + +# Lastly, include all the configs from /usr/share/lxc/config/common.conf.d/ +lxc.include = /usr/share/lxc/config/common.conf.d/ diff --git a/deploy/conf/default.conf b/deploy/conf/default.conf @@ -0,0 +1,4 @@ +lxc.network.type = veth +lxc.network.link = lxcbr0 +lxc.network.flags = up +lxc.network.hwaddr = 00:16:3e:xx:xx:xx diff --git a/deploy/conf/default.conf-3.0 b/deploy/conf/default.conf-3.0 @@ -0,0 +1,4 @@ +lxc.net.0.type = veth +lxc.net.0.link = lxcbr0 +lxc.net.0.flags = up +lxc.net.0.hwaddr = 00:16:3e:xx:xx:xx diff --git a/deploy/conf/lf-cloud-api.service b/deploy/conf/lf-cloud-api.service @@ -0,0 +1,12 @@ +[Unit] +Description=LouiFox Cisco Master API +After=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/sbin/lf-cloud-api -config /etc/cisco.conf +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/deploy/conf/lf-cloud-api.yaml b/deploy/conf/lf-cloud-api.yaml @@ -0,0 +1,12 @@ +cert: /etc/lf-cloud/cloud.louifox.house.crt +key: /etc/lf-cloud/cloud.louifox.house.key +host: 0.0.0.0 +port: 8080 +ca: /etc/lf-cloud/ca.crt +sshproxy: localhost +sshkey: /etc/lf-cloud/cisco.key +db: /var/lib/database_cisco.sqlite3.db +slaves: +{% for slave in groups['slave'] %} + - {{ hostvars[hosts[slave]['hostname']]['ansible_facts']['tun0']['ipv4']['address'] }}:8452 +{% endfor %} diff --git a/deploy/conf/lf-slave.service b/deploy/conf/lf-slave.service @@ -0,0 +1,12 @@ +[Unit] +Description=LouiFox Cisco Slave API +After=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/sbin/lf-slave -config /etc/lf-slave.conf +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/deploy/conf/lf-slave.yaml b/deploy/conf/lf-slave.yaml @@ -0,0 +1,2 @@ +db: /var/lib/lf-slave.db +port: 8452 diff --git a/deploy/conf/lxc-net b/deploy/conf/lxc-net @@ -0,0 +1,31 @@ +# Leave USE_LXC_BRIDGE as "true" if you want to use lxcbr0 for your +# containers. Set to "false" if you'll use virbr0 or another existing +# bridge, or mavlan to your host's NIC. +USE_LXC_BRIDGE="true" + +# If you change the LXC_BRIDGE to something other than lxcbr0, then +# you will also need to update your /etc/lxc/default.conf as well as the +# configuration (/var/lib/lxc/<container>/config) for any containers +# already created using the default config to reflect the new bridge +# name. +# If you have the dnsmasq daemon installed, you'll also have to update +# /etc/dnsmasq.d/lxc and restart the system wide dnsmasq daemon. +LXC_BRIDGE="lxcbr0" +LXC_ADDR="10.0.3.1" +LXC_NETMASK="255.255.255.0" +LXC_NETWORK="10.0.3.0/24" +LXC_DHCP_RANGE="10.0.3.2,10.0.3.254" +LXC_DHCP_MAX="253" +# Uncomment the next line if you'd like to use a conf-file for the lxcbr0 +# dnsmasq. For instance, you can use 'dhcp-host=mail1,10.0.3.100' to have +# container 'mail1' always get ip address 10.0.3.100. +#LXC_DHCP_CONFILE=/etc/lxc/dnsmasq.conf + +# Uncomment the next line if you want lxcbr0's dnsmasq to resolve the .lxc +# domain. You can then add "server=/lxc/10.0.3.1' (or your actual $LXC_ADDR) +# to your system dnsmasq configuration file (normally /etc/dnsmasq.conf, +# or /etc/NetworkManager/dnsmasq.d/lxc.conf on systems that use NetworkManager). +# Once these changes are made, restart the lxc-net and network-manager services. +# 'container1.lxc' will then resolve on your host. +#LXC_DOMAIN="lxc" + diff --git a/deploy/conf/motd b/deploy/conf/motd @@ -0,0 +1,14 @@ + + _ __ _ +| |/ _| ___(_)___ ___ ___ +| | |_ _____ / __| / __|/ __/ _ \ +| | _|_____| (__| \__ \ (_| (_) | +|_|_| \___|_|___/\___\___/ + +Host : {{ ansible_ssh_host }} +Physical : {{ physical_node_name.stdout }} +IP : {{ hosts[ansible_ssh_host]['ipv4'] }} +Arch : {{ vars['ansible_architecture'] }} +Distro : {{ vars['ansible_distribution'] }} +Kernel : {{ vars['ansible_kernel'] }} + diff --git a/deploy/deploy.yml b/deploy/deploy.yml @@ -0,0 +1,3 @@ +- import_playbook: common.yml +- import_playbook: slaves.yml +- import_playbook: master.yml diff --git a/deploy/master.retry b/deploy/master.retry @@ -0,0 +1 @@ +par1lf-cisco-prd-cl1master01 diff --git a/deploy/master.yml b/deploy/master.yml @@ -0,0 +1,31 @@ +- import_playbook: common.yml + +- name: Master configuration + hosts: "*master*" + tasks: + - name: Upload API binary + copy: + src: "{{ lookup('env', 'BIN_DIR') }}/api/lf-cloud-api" + dest: /usr/local/sbin/ + mode: +x + - name: Upload configuration + template: + src: ./conf/lf-cloud-api.yaml + dest: /etc/cisco.conf + - name: Upload service file + copy: + src: ./conf/lf-cloud-api.service + dest: /etc/systemd/system + - name: Enable & Restart service + systemd: + name: lf-cloud-api + state: restarted + enabled: yes + - name: Upload front (sources) + copy: + src: ../front/src/ + dest: /opt/front/src + - name: Upload front (index) + copy: + src: ../front/index.html + dest: /opt/front diff --git a/deploy/prd/deploy.retry b/deploy/prd/deploy.retry @@ -0,0 +1 @@ +par1lf-cisco-prd-cl1master01 diff --git a/deploy/prd/deploy.yml b/deploy/prd/deploy.yml @@ -0,0 +1 @@ +- import_playbook: ../deploy.yml diff --git a/deploy/prd/inventory.yml b/deploy/prd/inventory.yml @@ -0,0 +1,19 @@ +name: cisco +machines: + par: + 1: + lf: + cl1: + master: + - ipv4: 51.15.190.29 + ansible_ssh_port: 2222 + slave: + - ipv4: 192.168.1.102 + - ipv4: 192.168.1.152 + - ipv4: 192.168.1.78 + - ipv4: 192.168.1.158 + - ipv4: 192.168.1.129 + - ipv4: 192.168.1.79 + - ipv4: 192.168.1.155 + - ipv4: 192.168.1.182 + - ipv4: 192.168.1.141 diff --git a/deploy/slaves.yml b/deploy/slaves.yml @@ -0,0 +1,50 @@ +- name: Slaves configurations + hosts: "*slave*" + tasks: + - name: Install LXC + apt: + name: lxc + state: present + update_cache: yes + - name: Upload lxc-net configuration (default) + copy: + src: ./conf/lxc-net + dest: /etc/default/ + - name: Upload lxc-net configuration (etc) + copy: + src: ./conf/default.conf + dest: /etc/lxc/ + - name: Enable & Start lxc-net + systemd: + name: lxc-net + enabled: yes + state: restarted + +- name: Slave API + hosts: "*slave*" + tasks: + - name: Upload binary (Intel) + copy: + src: "{{ lookup('env', 'BIN_DIR') }}/slave/lf-slave" + dest: /usr/local/sbin/lf-slave + mode: +x + when: vars["ansible_architecture"] == "x86_64" + - name: Upload binary (ARM) + copy: + src: "{{ lookup('env', 'BIN_DIR') }}/slave/lf-slave-arm" + dest: /usr/local/sbin/lf-slave + mode: +x + when: vars["ansible_architecture"] == "armv7l" + - name: Upload service file + copy: + src: ./conf/lf-slave.service + dest: /etc/systemd/system + - name: Upload configuration file + copy: + src: ./conf/lf-slave.yaml + dest: /etc/lf-slave.conf + - name: Enable & Restart service + systemd: + name: lf-slave + state: restarted + enabled: yes diff --git a/scripts/dyn.py b/scripts/dyn.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +import sys +import yaml +import json +import os +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--hosts', action='store_true') +parser.add_argument('--list', action='store_true') +parser.add_argument('--host', nargs='+', action='store') +parser.add_argument('--dns', action='store_true') +args = parser.parse_args() + +# check arguments +if not (args.hosts or args.dns or args.list or args.host): + parser.error('There are no actions. You must use --hosts or --dns or --list or --host') + +def read_machines(inventory, machines, result, product_name, env): + if "machines" not in inventory: + return + + for region, d in inventory['machines'].items(): + for az, d in d.items(): + for datacenter, d in d.items(): + for cluster, d in d.items(): + for machine_type, d in d.items(): + count = 1 + for ip in d: + name = region + str(az) + datacenter + "-" + product_name + \ + "-" + env + "-" + cluster + machine_type + str(count).zfill(2) + machines[name] = { + 'ipv4': ip['ipv4'], + 'hostname': name, + } + + for key, item in ip.items(): + if key == "ipv4": + continue + machines[name][key] = item + + if machine_type not in result: + result[machine_type] = {'hosts': []} + result[machine_type]['hosts'].append(name) + + if machine_type not in result['all']['children']: + result['all']['children'].append(machine_type) + + result['all']['vars']['hosts'][name] = machines[name] + + count = count + 1 + +def include_ext_inv(machines, result, path): + with open(os.getenv("SRC_DIR") + '/'+ path +'/inventory.yml', 'r') as f: + try: + inventory = yaml.load(f) + except yaml.YAMLError as exc: + print(exc) + sys.exit(1) + + product_name = inventory['name'] + if "environment" not in inventory: + env = path.split("/")[-1] + else: + env = inventory["environment"] + + read_machines(inventory, machines, result, product_name, env) + +with open('inventory.yml', 'r') as f: + try: + inventory = yaml.load(f) + except yaml.YAMLError as exc: + print(exc) + sys.exit(1) + + product_name = inventory['name'] + + if "environment" not in inventory: + env = os.getcwd().split('/')[-1] + + if env != "prd" and env != "stg" and env != "dev": + print("Unknown environment: " + env) + sys.exit(1) + else: + env = inventory["environment"] + + machines = {} + result = { + 'all': { + 'children': [], + 'vars': inventory['vars'] if "vars" in inventory else {} + } + } + + result['all']['vars']['hosts'] = {} + result['all']['vars']['product_name'] = product_name + + # Default to root user for SSH if none was specified + if "ansible_ssh_user" not in result['all']['vars']: + result['all']['vars']['ansible_ssh_user'] = "root" + + read_machines(inventory, machines, result, product_name, env) + + if "includes" in inventory: + for include in inventory['includes']: + include_ext_inv(machines, result, include) + + if len(sys.argv) == 1: + print("Need at least one option") + sys.exit(1) + + if args.hosts: + for key, machine in machines.items(): + print(machine['ipv4'] + "\t" + key) + elif args.dns: + for key, machine in machines.items(): + print(key +" IN A "+ machine['ipv4']) + elif args.list: + print(json.dumps(result, indent=4)) + elif args.host: + if args.host[0] in machines: + print(json.dumps(machines[sys.argv[2]]))