forked from stove/risotto
Compare commits
16 commits
7b74984fb7
...
54ff8f23ed
| Author | SHA1 | Date | |
|---|---|---|---|
| 54ff8f23ed | |||
| 63931b8880 | |||
| 186c886e84 | |||
| 5e735cf453 | |||
| 7ebcff86cb | |||
| 0705409393 | |||
| 84315c7bae | |||
| f0d3ca8f43 | |||
| 88fc83014b | |||
| 120af2ff01 | |||
| 44eb0031de | |||
| bfa697f457 | |||
| 0b0503e109 | |||
| 7f745b3609 | |||
| 4bacf38188 | |||
| 79a549cd4c |
21 changed files with 865 additions and 527 deletions
|
|
@ -1,10 +1,16 @@
|
|||
#!/usr/bin/python3
|
||||
from asyncio import run
|
||||
from os import readlink, walk, chdir, getcwd, makedirs
|
||||
from os.path import join, islink, isdir
|
||||
from risotto.machine import build_files, INSTALL_DIR, INSTALL_CONFIG_DIR, INSTALL_TMPL_DIR, INSTALL_IMAGES_DIR, INSTALL_TESTS_DIR
|
||||
from shutil import rmtree
|
||||
from typing import Dict, Any
|
||||
from shutil import rmtree, copy2
|
||||
import tarfile
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible import constants
|
||||
|
||||
from rougail.template import base
|
||||
from rougail.error import TemplateError
|
||||
from risotto.machine import build_files, INSTALL_DIR, INSTALL_CONFIG_DIR, INSTALL_TMPL_DIR, INSTALL_IMAGES_DIR, INSTALL_TESTS_DIR
|
||||
from risotto.utils import custom_filters
|
||||
|
||||
|
||||
try:
|
||||
|
|
@ -13,6 +19,7 @@ try:
|
|||
class FakeModule(AnsibleModule):
|
||||
def __init__(self):
|
||||
pass
|
||||
from ansible.plugins.action.template import ActionModule as TmplActionModule
|
||||
except:
|
||||
class ActionBase():
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
@ -64,15 +71,24 @@ class ActionModule(ActionBase):
|
|||
only_machine = module_args.pop('only_machine')
|
||||
configure_host = module_args.pop('configure_host')
|
||||
copy_tests = module_args.pop('copy_tests')
|
||||
# define ansible engine
|
||||
base.ENGINES['ansible'] = Tmpl(task_vars,
|
||||
self._task,
|
||||
self._connection,
|
||||
self._play_context,
|
||||
self._loader,
|
||||
self._templar,
|
||||
self._shared_loader_obj,
|
||||
)
|
||||
if 'copy_templates' in module_args:
|
||||
copy_templates = module_args.pop('copy_templates')
|
||||
else:
|
||||
copy_templates = False
|
||||
directories, certificates = run(build_files(hostname,
|
||||
only_machine,
|
||||
False,
|
||||
copy_tests,
|
||||
))
|
||||
directories, certificates = build_files(hostname,
|
||||
only_machine,
|
||||
False,
|
||||
copy_tests,
|
||||
)
|
||||
module_args['directories'] = list(directories.values())
|
||||
module_args['directories'].append('/var/lib/risotto/images_files')
|
||||
remote = self._execute_module(module_name='compare',
|
||||
|
|
@ -86,17 +102,14 @@ class ActionModule(ActionBase):
|
|||
msg = remote['msg']
|
||||
raise Exception(f'error in remote action: {msg}')
|
||||
if copy_templates:
|
||||
run(build_files(hostname,
|
||||
only_machine,
|
||||
True,
|
||||
copy_tests,
|
||||
))
|
||||
build_files(hostname,
|
||||
only_machine,
|
||||
True,
|
||||
copy_tests,
|
||||
)
|
||||
|
||||
machines_changed = []
|
||||
tls_machine = None
|
||||
for machine, directory in directories.items():
|
||||
if machine.startswith('tls.'):
|
||||
tls_machine = machine
|
||||
if directory not in remote['directories']:
|
||||
machines_changed.append(machine)
|
||||
continue
|
||||
|
|
@ -150,6 +163,12 @@ class ActionModule(ActionBase):
|
|||
tar_filename = f'{ARCHIVES_DIR}/{INSTALL_IMAGES_DIR}.tar'
|
||||
with tarfile.open(tar_filename, 'w') as archive:
|
||||
archive.add('.')
|
||||
self._execute_module(module_name='file',
|
||||
module_args={'path': '/tmp/new_configurations',
|
||||
'state': 'directory',
|
||||
},
|
||||
task_vars=task_vars,
|
||||
)
|
||||
self._transfer_file(tar_filename, tar_filename)
|
||||
# tests
|
||||
self._execute_module(module_name='file',
|
||||
|
|
@ -190,5 +209,72 @@ class ActionModule(ActionBase):
|
|||
changed=changed,
|
||||
machines_changed=machines,
|
||||
host_changed=self._task.args['hostname'] in machines_changed,
|
||||
tls_machine=tls_machine,
|
||||
)
|
||||
|
||||
|
||||
class FakeCopy:
|
||||
def __init__(self, task):
|
||||
self.task = task
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
copy2(self.task.args['src'], self.task.args['dest'])
|
||||
return {}
|
||||
|
||||
|
||||
class FakeGet:
|
||||
def __init__(self, klass):
|
||||
self.klass = klass
|
||||
|
||||
def fake_get(self, action, *args, task, **kwargs):
|
||||
if action == 'ansible.legacy.copy':
|
||||
return FakeCopy(task)
|
||||
return self.klass.ori_get(action, *args, task=task, **kwargs)
|
||||
|
||||
|
||||
class Tmpl(TmplActionModule):
|
||||
def __init__(self, task_vars, *args):
|
||||
super().__init__(*args)
|
||||
self.task_vars = task_vars
|
||||
|
||||
def _early_needs_tmp_path(self):
|
||||
# do not create tmp remotely
|
||||
return False
|
||||
|
||||
def process(self,
|
||||
filename: str,
|
||||
source: str,
|
||||
true_destfilename: str,
|
||||
destfilename: str,
|
||||
destdir: str,
|
||||
variable: Any,
|
||||
index: int,
|
||||
rougail_variables_dict: Dict,
|
||||
eosfunc: Dict,
|
||||
extra_variables: Any=None,
|
||||
):
|
||||
if source is not None: # pragma: no cover
|
||||
raise TemplateError(_('source is not supported for ansible'))
|
||||
task_vars = rougail_variables_dict | self.task_vars
|
||||
if variable is not None:
|
||||
task_vars['rougail_variable'] = variable
|
||||
if index is not None:
|
||||
task_vars['rougail_index'] = index
|
||||
if extra_variables:
|
||||
task_vars['extra_variables'] = extra_variables
|
||||
task_vars['rougail_filename'] = true_destfilename
|
||||
task_vars['rougail_destination_dir'] = destdir
|
||||
self._task.args['src'] = filename
|
||||
self._task.args['dest'] = destfilename
|
||||
# add custom filter
|
||||
custom_filters.update(eosfunc)
|
||||
# do not copy file in host but stay it locally
|
||||
self._shared_loader_obj.action_loader.ori_get = self._shared_loader_obj.action_loader.get
|
||||
self._shared_loader_obj.action_loader.get = FakeGet(self._shared_loader_obj.action_loader).fake_get
|
||||
# template
|
||||
ret = self.run(task_vars=task_vars)
|
||||
# restore get function
|
||||
self._shared_loader_obj.action_loader.get = self._shared_loader_obj.action_loader.ori_get
|
||||
# remove custom filter
|
||||
custom_filters.clear()
|
||||
if ret.get('failed'):
|
||||
raise TemplateError(f'error while templating "{filename}": {ret["msg"]}')
|
||||
|
|
|
|||
8
ansible/filter_plugins/custom.py
Normal file
8
ansible/filter_plugins/custom.py
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
from risotto.utils import custom_filters
|
||||
|
||||
|
||||
class FilterModule:
|
||||
"""This filter is used to load custom filter from dataset
|
||||
"""
|
||||
def filters(self):
|
||||
return custom_filters
|
||||
|
|
@ -127,7 +127,7 @@ def machineslist(data, only=None, only_name=False):
|
|||
|
||||
|
||||
def modulename(data, servername):
|
||||
return data[servername]['module_name']
|
||||
return data[servername]['general']['module_name']
|
||||
|
||||
|
||||
class FilterModule:
|
||||
|
|
|
|||
32
ansible/filter_plugins/normalize_family.py
Normal file
32
ansible/filter_plugins/normalize_family.py
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Silique (https://www.silique.fr)
|
||||
Copyright (C) 2023
|
||||
|
||||
distribued with GPL-2 or later license
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
"""
|
||||
|
||||
|
||||
from rougail.utils import normalize_family
|
||||
|
||||
|
||||
class FilterModule:
|
||||
def filters(self):
|
||||
return {
|
||||
'normalize_family': normalize_family,
|
||||
}
|
||||
11
ansible/filter_plugins/raise.py
Normal file
11
ansible/filter_plugins/raise.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
from jinja2.exceptions import TemplateRuntimeError
|
||||
|
||||
def fraise(msg):
|
||||
raise TemplateRuntimeError(msg)
|
||||
|
||||
|
||||
class FilterModule:
|
||||
def filters(self):
|
||||
return {
|
||||
'raise': fraise,
|
||||
}
|
||||
|
|
@ -2,6 +2,14 @@
|
|||
- name: "Populate service facts"
|
||||
service_facts:
|
||||
|
||||
- name: "Set timezone"
|
||||
timezone:
|
||||
name: Europe/Paris
|
||||
|
||||
- name: Set a hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
|
||||
- name: "Packages installation"
|
||||
apt:
|
||||
pkg: "{{ vars[inventory_hostname]['general']['host_packages'] }}"
|
||||
|
|
@ -63,3 +71,25 @@
|
|||
path: /var/lib/risotto/tls
|
||||
state: directory
|
||||
mode: "755"
|
||||
|
||||
- name: "Add keyrings directory"
|
||||
file:
|
||||
path: /etc/apt/keyrings
|
||||
state: directory
|
||||
mode: "755"
|
||||
|
||||
- name: "Add vector signed repositories"
|
||||
ansible.builtin.get_url:
|
||||
url: https://repositories.timber.io/public/vector/gpg.3543DB2D0A2BC4B8.key
|
||||
dest: /etc/apt/keyrings/vector.asc
|
||||
|
||||
- name: "Add vector repository"
|
||||
ansible.builtin.apt_repository:
|
||||
repo: "deb [signed-by=/etc/apt/keyrings/vector.asc] https://repositories.timber.io/public/vector/deb/debian {{ ansible_distribution_release }} main"
|
||||
state: present
|
||||
|
||||
- name: "Install vector"
|
||||
ansible.builtin.apt:
|
||||
name: vector
|
||||
update_cache: yes
|
||||
state: present
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ from argparse import ArgumentParser
|
|||
from json import load as json_load, dumps, JSONEncoder
|
||||
from os import remove
|
||||
from os.path import isfile
|
||||
from asyncio import run
|
||||
from traceback import print_exc
|
||||
from sys import stderr, argv
|
||||
|
||||
from risotto.machine import load, TIRAMISU_CACHE, VALUES_CACHE, INFORMATIONS_CACHE, ROUGAIL_NAMESPACE, ROUGAIL_NAMESPACE_DESCRIPTION
|
||||
from tiramisu import Config
|
||||
|
|
@ -40,12 +40,13 @@ class RisottoInventory(object):
|
|||
parser.add_argument('--host', action='store')
|
||||
parser.add_argument('--nocache', action='store_true')
|
||||
parser.add_argument('--debug', action='store_true')
|
||||
parser.add_argument('--pretty_print', action='store_true')
|
||||
self.args = parser.parse_args()
|
||||
if self.args.debug:
|
||||
global DEBUG
|
||||
DEBUG = True
|
||||
|
||||
async def run(self):
|
||||
def run(self):
|
||||
if self.args.list and self.args.host:
|
||||
raise Exception('cannot have --list and --host together')
|
||||
if self.args.list or self.args.nocache:
|
||||
|
|
@ -55,20 +56,20 @@ class RisottoInventory(object):
|
|||
remove(VALUES_CACHE)
|
||||
if isfile(INFORMATIONS_CACHE):
|
||||
remove(INFORMATIONS_CACHE)
|
||||
config = await load(TIRAMISU_CACHE,
|
||||
VALUES_CACHE,
|
||||
INFORMATIONS_CACHE,
|
||||
)
|
||||
config = load(TIRAMISU_CACHE,
|
||||
VALUES_CACHE,
|
||||
INFORMATIONS_CACHE,
|
||||
)
|
||||
if self.args.list:
|
||||
return await self.do_inventory(config)
|
||||
return self.do_inventory(config)
|
||||
elif self.args.host:
|
||||
return await self.get_vars(config, self.args.host)
|
||||
return self.get_vars(config, self.args.host)
|
||||
raise Exception('pfff')
|
||||
|
||||
async def do_inventory(self,
|
||||
config: Config,
|
||||
) -> dict:
|
||||
servers = [await subconfig.option.doc() for subconfig in await config.option.list('optiondescription') if await subconfig.information.get('module') == 'host']
|
||||
def do_inventory(self,
|
||||
config: Config,
|
||||
) -> dict:
|
||||
servers = [subconfig.doc() for subconfig in config.option.list('optiondescription') if subconfig.information.get('module') == 'host']
|
||||
return dumps({
|
||||
'group': {
|
||||
'hosts': servers,
|
||||
|
|
@ -81,44 +82,54 @@ class RisottoInventory(object):
|
|||
}
|
||||
})
|
||||
|
||||
async def get_vars(self,
|
||||
config: Config,
|
||||
host_name: str,
|
||||
) -> dict:
|
||||
def get_vars(self,
|
||||
config: Config,
|
||||
host_name: str,
|
||||
) -> dict:
|
||||
ret = {}
|
||||
rougailconfig = RougailConfig.copy()
|
||||
rougailconfig['variable_namespace'] = ROUGAIL_NAMESPACE
|
||||
rougailconfig['variable_namespace_description'] = ROUGAIL_NAMESPACE_DESCRIPTION
|
||||
for subconfig in await config.option.list('optiondescription'):
|
||||
server_name = await subconfig.option.description()
|
||||
module_name = await subconfig.option(await subconfig.information.get('provider:global:module_name')).value.get()
|
||||
for subconfig in config.option.list('optiondescription'):
|
||||
server_name = subconfig.description()
|
||||
module_name = subconfig.option(subconfig.information.get('provider:global:module_name')).value.get()
|
||||
if module_name == 'host' and server_name != host_name:
|
||||
continue
|
||||
engine = RougailSystemdTemplate(subconfig, rougailconfig)
|
||||
await engine.load_variables()
|
||||
engine.load_variables(with_flatten=False)
|
||||
if module_name != 'host' and engine.rougail_variables_dict['general']['host'] != host_name:
|
||||
continue
|
||||
ret[server_name] = engine.rougail_variables_dict
|
||||
ret['modules'] = await config.information.get('modules')
|
||||
ret['modules'] = config.information.get('modules')
|
||||
ret['delete_old_image'] = False
|
||||
ret['configure_host'] = True
|
||||
ret['only_machine'] = None
|
||||
ret['copy_templates'] = False
|
||||
ret['copy_tests'] = False
|
||||
ret['host_install_dir'] = ret[host_name].pop('host_install_dir')
|
||||
ret['host_install_dir'] = ret[host_name]['general']['host_install_dir']
|
||||
return dumps(ret, cls=RougailEncoder)
|
||||
|
||||
|
||||
# Get the inventory.
|
||||
async def main():
|
||||
def main():
|
||||
try:
|
||||
inv = RisottoInventory()
|
||||
values = await inv.run()
|
||||
print(values)
|
||||
values = inv.run()
|
||||
if inv.args.pretty_print:
|
||||
from pprint import pprint
|
||||
from json import loads
|
||||
pprint(loads(values))
|
||||
else:
|
||||
print(values)
|
||||
except Exception as err:
|
||||
if DEBUG:
|
||||
print_exc()
|
||||
print(err)
|
||||
print('---', file=stderr)
|
||||
extra=''
|
||||
else:
|
||||
extra=f'\nmore informations with commandline "{" ".join(argv)} --debug"'
|
||||
print(f'{err}{extra}', file=stderr)
|
||||
exit(1)
|
||||
|
||||
|
||||
run(main())
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ def run_module():
|
|||
module_args = dict(
|
||||
state=dict(type='str', required=True),
|
||||
machines=dict(type='list', required=True),
|
||||
tls_machine=dict(type='str', required=True),
|
||||
tls_machine=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
# seed the result dict in the object
|
||||
|
|
@ -184,10 +184,10 @@ def run_module():
|
|||
# manipulate or modify the state as needed (this is going to be the
|
||||
# part where your module will do what it needs to do)
|
||||
machines = module.params['machines']
|
||||
tls_machine = module.params['tls_machine']
|
||||
tls_machine = module.params.get('tls_machine')
|
||||
if module.params['state'] == 'stopped':
|
||||
if tls_machine and tls_machine not in machines:
|
||||
machines.append(tls_machine)
|
||||
if tls_machine and tls_machine in machines:
|
||||
machines.remove(tls_machine)
|
||||
bus = SystemBus()
|
||||
result['changed'], errors = stop(bus, machines)
|
||||
if errors:
|
||||
|
|
|
|||
|
|
@ -1,15 +1,2 @@
|
|||
- name: "Create SRV directory for {{ item.name}}"
|
||||
file:
|
||||
path: /var/lib/risotto/srv/{{ item.name }}
|
||||
state: directory
|
||||
mode: 0755
|
||||
when: "item.srv"
|
||||
|
||||
- name: "Create journald directory for {{ item.name }}"
|
||||
file:
|
||||
path: /var/lib/risotto/journals/{{ item.name }}
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: "Create informations for {{ item.name }}"
|
||||
ansible.builtin.shell: "/usr/bin/echo {{ vars | modulename(item.name) }} > /var/lib/risotto/machines_informations/{{ item.name }}.image"
|
||||
|
|
|
|||
|
|
@ -1,48 +1,46 @@
|
|||
- name: "Rebuild images"
|
||||
ansible.builtin.shell: "/usr/local/sbin/update_images just_need_images"
|
||||
ansible.builtin.shell: "/usr/local/sbin/update_images {{ vars[vars['inventory_hostname']]['general']['tls_server'] }} do_not_start"
|
||||
register: ret
|
||||
failed_when: ret.rc != 0
|
||||
|
||||
- name: "Stop machine TLS"
|
||||
machinectl:
|
||||
state: stopped
|
||||
machines: "{{ build_host.tls_machine }}"
|
||||
tls_machine: "{{ build_host.tls_machine }}"
|
||||
when: build_host.tls_machine in build_host.machines_changed
|
||||
machines: "{{ vars[vars['inventory_hostname']]['general']['tls_server'] }}"
|
||||
when: vars[vars['inventory_hostname']]['general']['tls_server'] in machines_changed
|
||||
|
||||
- name: "Remove TLS files directory"
|
||||
file:
|
||||
path: "/var/lib/risotto/configurations/{{ build_host.tls_machine }}"
|
||||
path: "/var/lib/risotto/configurations/{{ vars[vars['inventory_hostname']]['general']['tls_server'] }}"
|
||||
state: absent
|
||||
when: build_host.tls_machine in build_host.machines_changed
|
||||
when: vars[vars['inventory_hostname']]['general']['tls_server'] in machines_changed
|
||||
|
||||
- name: "Copy TLS configuration"
|
||||
unarchive:
|
||||
src: /tmp/new_configurations/machines.tar
|
||||
dest: "/var/lib/risotto/configurations/"
|
||||
include: "{{ build_host.tls_machine }}"
|
||||
include: "{{ vars[vars['inventory_hostname']]['general']['tls_server'] }}"
|
||||
owner: root
|
||||
group: root
|
||||
when: build_host.tls_machine in build_host.machines_changed
|
||||
when: vars[vars['inventory_hostname']]['general']['tls_server'] in machines_changed
|
||||
|
||||
- name: "Start machine TLS"
|
||||
machinectl:
|
||||
state: started
|
||||
machines: "{{ build_host.tls_machine }}"
|
||||
tls_machine: "{{ build_host.tls_machine }}"
|
||||
when: build_host.tls_machine in build_host.machines_changed
|
||||
machines: "{{ vars[vars['inventory_hostname']]['general']['tls_server'] }}"
|
||||
when: vars[vars['inventory_hostname']]['general']['tls_server'] in machines_changed
|
||||
|
||||
- name: "Stop machines with new configuration {{ build_host.machines_changed }}"
|
||||
- name: "Stop machines with new configuration {{ machines_changed }}"
|
||||
machinectl:
|
||||
state: stopped
|
||||
machines: "{{ build_host.machines_changed }}"
|
||||
tls_machine: "{{ build_host.tls_machine }}"
|
||||
machines: "{{ machines_changed }}"
|
||||
tls_machine: "{{ vars[vars['inventory_hostname']]['general']['tls_server'] }}"
|
||||
|
||||
- name: "Remove files directory"
|
||||
file:
|
||||
path: "/var/lib/risotto/configurations/{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ build_host.machines_changed }}"
|
||||
loop: "{{ machines_changed }}"
|
||||
|
||||
- name: "Copy configuration"
|
||||
unarchive:
|
||||
|
|
@ -50,19 +48,19 @@
|
|||
dest: /var/lib/risotto/configurations/
|
||||
owner: root
|
||||
group: root
|
||||
when: build_host.machines_changed
|
||||
when: machines_changed
|
||||
|
||||
- name: "Enable machines"
|
||||
machinectl:
|
||||
state: enabled
|
||||
machines: "{{ vars | machineslist(only_name=True) }}"
|
||||
tls_machine: "{{ build_host.tls_machine }}"
|
||||
tls_machine: "{{ vars[vars['inventory_hostname']]['general']['tls_server'] }}"
|
||||
|
||||
- name: "Start machines"
|
||||
machinectl:
|
||||
state: started
|
||||
machines: "{{ vars | machineslist(only_name=True) }}"
|
||||
tls_machine: "{{ build_host.tls_machine }}"
|
||||
tls_machine: "{{ vars[vars['inventory_hostname']]['general']['tls_server'] }}"
|
||||
|
||||
- name: "Remove compressed files directory"
|
||||
local_action:
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
copy_templates: "{{ copy_templates }}"
|
||||
register: build_host
|
||||
|
||||
- name: Print return information from the previous task
|
||||
- name: "Change"
|
||||
ansible.builtin.debug:
|
||||
var: build_host
|
||||
|
||||
|
|
@ -31,3 +31,5 @@
|
|||
#
|
||||
- name: "Install and apply configurations"
|
||||
include_tasks: machines.yml
|
||||
vars:
|
||||
machines_changed: "{{ build_host.machines_changed }}"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
#!/bin/bash -e
|
||||
#!/bin/bash -ex
|
||||
|
||||
#START=$1
|
||||
BACKUP_DIR="/root/backup"
|
||||
|
||||
MACHINES=""
|
||||
|
|
@ -15,23 +14,14 @@ done
|
|||
cd /var/lib/risotto/srv/
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
for machine in $MACHINES; do
|
||||
# machinectl stop $machine || true
|
||||
# while true; do
|
||||
# machinectl status "$machine" > /dev/null 2>&1 || break
|
||||
# sleep 1
|
||||
# done
|
||||
BACKUP_FILE="$BACKUP_DIR/backup_$machine.tar.bz2"
|
||||
rm -f "$BACKUP_FILE"
|
||||
if [ -f "/var/lib/risotto/configurations/$machine/sbin/risotto_backup" ]; then
|
||||
machinectl -q shell $machine /usr/local/lib/sbin/risotto_backup
|
||||
tar -cJf $BACKUP_FILE $machine/backup
|
||||
else
|
||||
tar -cJf $BACKUP_FILE $machine
|
||||
tar --ignore-failed-read -cJf $BACKUP_FILE $machine/backup
|
||||
elif [ ! -f "/var/lib/risotto/configurations/$machine/no_risotto_backup" ]; then
|
||||
tar --ignore-failed-read -cJf $BACKUP_FILE $machine
|
||||
fi
|
||||
done
|
||||
|
||||
#if [ -z "$START" ]; then
|
||||
# machinectl start $MACHINES
|
||||
#fi
|
||||
|
||||
exit 0
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash -e
|
||||
#!/bin/bash -ex
|
||||
|
||||
IMAGE_NAME=$1
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ for machine in $MACHINES; do
|
|||
if ! echo "$STARTED" | grep -q " $machine "; then
|
||||
echo
|
||||
echo "========= $machine"
|
||||
machinectl -q shell $machine /usr/bin/systemctl is-system-running 2>/dev/null || echo "not started"
|
||||
machinectl -q shell $machine /usr/bin/systemctl is-system-running 2>/dev/null || systemctl status systemd-nspawn@$machine.service || true
|
||||
fi
|
||||
done
|
||||
echo $DEGRADED
|
||||
|
|
|
|||
|
|
@ -1,5 +1,12 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
TLS_SERVER=$1
|
||||
if [ -z "$TLS_SERVER" ]; then
|
||||
echo "$0 nom_tls_server"
|
||||
exit 1
|
||||
fi
|
||||
DO_NOT_START=$2
|
||||
REBOOT_EVERY_MONDAY=$3
|
||||
# root dir configuration
|
||||
RISOTTO_DIR="/var/lib/risotto"
|
||||
RISOTTO_IMAGE_DIR="$RISOTTO_DIR/images"
|
||||
|
|
@ -16,34 +23,65 @@ ls /var/lib/risotto/images_files/ | while read image; do
|
|||
if [ -d /var/lib/risotto/images_files/"$image" ]; then
|
||||
echo
|
||||
echo "Install image $image" | tee -a /var/log/risotto/update_images.log
|
||||
/usr/local/sbin/build_image "$image" "$1" | tee -a /var/log/risotto/update_images.log || (echo "PROBLEME" | tee -a /var/log/risotto/update_images.log; true)
|
||||
/usr/local/sbin/build_image "$image" | tee -a /var/log/risotto/update_images.log || (echo "PROBLEME" | tee -a /var/log/risotto/update_images.log; true)
|
||||
fi
|
||||
done
|
||||
|
||||
idx=0
|
||||
if [ -z "$DO_NOT_START" ]; then
|
||||
machinectl reboot "$TLS_SERVER" || machinectl start "$TLS_SERVER"
|
||||
while true; do
|
||||
status=$(machinectl -q shell "$TLS_SERVER" /usr/bin/systemctl is-system-running 2>/dev/null || echo "not started")
|
||||
if echo "$status" | grep -q degraded || echo "$status" | grep -q running; then
|
||||
break
|
||||
fi
|
||||
idx=$((idx+1))
|
||||
if [ $idx = 60 ]; then
|
||||
echo "le serveur $TLS_SERVER n'a pas encore redémarré"
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
fi
|
||||
|
||||
MACHINES=""
|
||||
for nspawn in $(ls /etc/systemd/nspawn/*.nspawn); do
|
||||
nspawn_file=$(basename $nspawn)
|
||||
nspawn_file=$(basename "$nspawn")
|
||||
machine=${nspawn_file%.*}
|
||||
MACHINES="$MACHINES$machine "
|
||||
MACHINE_MACHINES_DIR="/var/lib/machines/$machine"
|
||||
IMAGE_NAME_RISOTTO_IMAGE_NAME="$(cat $RISOTTO_DIR/machines_informations/$machine.image)"
|
||||
MACHINE_INFO="$RISOTTO_DIR/machines_informations/"
|
||||
VERSION_MACHINE="$MACHINE_INFO/$machine.version"
|
||||
if [ -n "$REBOOT_EVERY_MONDAY" ] && [ "$(date +%u)" = 1 ]; then
|
||||
# update TLS certificate every monday, so stop container
|
||||
machinectl stop "$machine" 2> /dev/null || true
|
||||
while true; do
|
||||
machinectl status "$machine" > /dev/null 2>&1 || break
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
if [ ! -d "$MACHINE_MACHINES_DIR" ]; then
|
||||
rm -f "$VERSION_MACHINE"
|
||||
fi
|
||||
diff -q "$RISOTTO_IMAGE_DIR/$IMAGE_NAME_RISOTTO_IMAGE_NAME".version "$VERSION_MACHINE" &> /dev/null || (
|
||||
echo "Reinstall machine $machine"
|
||||
machinectl stop $machine || true
|
||||
machinectl stop "$machine" 2> /dev/null || true
|
||||
while true; do
|
||||
machinectl status "$machine" > /dev/null 2>&1 || break
|
||||
sleep 1
|
||||
done
|
||||
rm -rf "$MACHINE_MACHINES_DIR"
|
||||
mkdir "$MACHINE_MACHINES_DIR"
|
||||
cp -a --reflink=auto $RISOTTO_IMAGE_DIR/$IMAGE_NAME_RISOTTO_IMAGE_NAME/* $MACHINE_MACHINES_DIR
|
||||
cp -a --reflink=auto "$RISOTTO_IMAGE_DIR/$IMAGE_NAME_RISOTTO_IMAGE_NAME/"* "$MACHINE_MACHINES_DIR"
|
||||
cp -a --reflink=auto "$RISOTTO_IMAGE_DIR/$IMAGE_NAME_RISOTTO_IMAGE_NAME".version "$VERSION_MACHINE"
|
||||
)
|
||||
done
|
||||
if [ -z "$1" ]; then
|
||||
if [ -z "$DO_NOT_START" ]; then
|
||||
echo "start $MACHINES"
|
||||
machinectl start $MACHINES
|
||||
sleep 5
|
||||
journalctl -n 100 --no-pager
|
||||
diagnose
|
||||
fi
|
||||
exit 0
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from asyncio import run
|
||||
from tabulate import tabulate
|
||||
from argparse import ArgumentParser
|
||||
|
||||
|
|
@ -18,19 +17,19 @@ def list_to_string(lst):
|
|||
return lst
|
||||
|
||||
|
||||
async def get_files_subelements(type_name, element, files_subelement, files_cols):
|
||||
def get_files_subelements(type_name, element, files_subelement, files_cols):
|
||||
data = {}
|
||||
if not await element.option('activate').value.get():
|
||||
if not element.option('activate').value.get():
|
||||
return data
|
||||
for subelement in files_subelement.values():
|
||||
if subelement['type'] == 'subelement':
|
||||
try:
|
||||
value = list_to_string(await element.option(subelement['key']).value.get())
|
||||
value = list_to_string(element.option(subelement['key']).value.get())
|
||||
# FIXME except AttributeError:
|
||||
except Exception:
|
||||
value = ''
|
||||
elif subelement['type'] == 'information':
|
||||
value = await element.information.get(subelement['key'], '')
|
||||
value = element.information.get(subelement['key'], '')
|
||||
elif subelement['type'] == 'none':
|
||||
value = subelement['value']
|
||||
else:
|
||||
|
|
@ -47,7 +46,7 @@ async def get_files_subelements(type_name, element, files_subelement, files_cols
|
|||
return data
|
||||
|
||||
|
||||
async def services(config, values):
|
||||
def services(config, values):
|
||||
files_subelement = {'Source': {'key': 'source', 'type': 'information'},
|
||||
'Nom': {'key': 'name', 'type': 'subelement'},
|
||||
'Variable': {'key': 'variable', 'type': 'subelement'},
|
||||
|
|
@ -57,26 +56,26 @@ async def services(config, values):
|
|||
'Moteur': {'key': 'engine', 'type': 'information'},
|
||||
}
|
||||
disabled_services = []
|
||||
for service in await config.option.list(type="all"):
|
||||
doc = await service.option.doc()
|
||||
for service in config.option.list(type="all"):
|
||||
doc = service.option.doc()
|
||||
files_lst = []
|
||||
files_cols = set()
|
||||
if not await service.option('manage').value.get():
|
||||
if not service.option('manage').value.get():
|
||||
doc += " - unmanaged"
|
||||
if not await service.option('activate').value.get():
|
||||
if not service.option('activate').value.get():
|
||||
disabled_services.append([doc])
|
||||
else:
|
||||
for type in await service.list(type="all"):
|
||||
type_name = await type.option.doc()
|
||||
for type in service.list(type="all"):
|
||||
type_name = type.option.doc()
|
||||
if type_name in ['files', 'overrides']:
|
||||
for element in await type.list(type="all"):
|
||||
data = await get_files_subelements(type_name, element, files_subelement, files_cols)
|
||||
for element in type.list(type="all"):
|
||||
data = get_files_subelements(type_name, element, files_subelement, files_cols)
|
||||
if data:
|
||||
files_lst.append(data)
|
||||
elif type_name == 'manage':
|
||||
pass
|
||||
elif type_name == 'activate':
|
||||
if not await type.value.get():
|
||||
if not type.value.get():
|
||||
doc += " - unactivated"
|
||||
else:
|
||||
print("FIXME " + type_name)
|
||||
|
|
@ -89,19 +88,19 @@ async def services(config, values):
|
|||
values["Services désactivés"] = {'keys': ['Nom'], 'lst': disabled_services}
|
||||
|
||||
|
||||
async def table_leader(config, read_only):
|
||||
def table_leader(config, read_only):
|
||||
keys = ['Description']
|
||||
if read_only:
|
||||
keys.append('Cachée')
|
||||
leadership_lst = await config.list(type="all")
|
||||
leadership_lst = config.list(type="all")
|
||||
leader = leadership_lst.pop(0)
|
||||
leader_owner = await leader.owner.get()
|
||||
follower_names = [await follower.option.name() for follower in leadership_lst]
|
||||
doc = await leader.option.doc()
|
||||
properties = await leader.property.get()
|
||||
leader_owner = leader.owner.get()
|
||||
follower_names = [follower.option.name() for follower in leadership_lst]
|
||||
doc = leader.option.doc()
|
||||
properties = leader.property.get()
|
||||
if 'mandatory' in properties:
|
||||
doc += '*'
|
||||
name = await leader.option.name()
|
||||
name = leader.option.name()
|
||||
lst = [[f'{doc} ({name})']]
|
||||
if read_only:
|
||||
if 'hidden' in properties:
|
||||
|
|
@ -109,7 +108,7 @@ async def table_leader(config, read_only):
|
|||
else:
|
||||
hidden = ''
|
||||
lst[0].append(hidden)
|
||||
for idx, leader_value in enumerate(await leader.value.get()):
|
||||
for idx, leader_value in enumerate(leader.value.get()):
|
||||
keys.append(f'Valeur {idx}')
|
||||
keys.append(f'Utilisateur {idx}')
|
||||
lst[0].append(leader_value)
|
||||
|
|
@ -117,11 +116,11 @@ async def table_leader(config, read_only):
|
|||
for follower_idx, follower_name in enumerate(follower_names):
|
||||
follower_option = config.option(follower_name, idx)
|
||||
if idx == 0:
|
||||
doc = await follower_option.option.doc()
|
||||
properties = await follower_option.property.get()
|
||||
doc = follower_option.option.doc()
|
||||
properties = follower_option.property.get()
|
||||
if 'mandatory' in properties:
|
||||
doc += '*'
|
||||
name = await follower_option.option.name()
|
||||
name = follower_option.option.name()
|
||||
lst.append([f'{doc} ({name})'])
|
||||
if read_only:
|
||||
if 'hidden' in properties:
|
||||
|
|
@ -130,48 +129,48 @@ async def table_leader(config, read_only):
|
|||
hidden = ''
|
||||
lst[-1].append(hidden)
|
||||
try:
|
||||
lst[follower_idx + 1].append(list_to_string(await follower_option.value.get()))
|
||||
lst[follower_idx + 1].append(await follower_option.owner.get())
|
||||
lst[follower_idx + 1].append(list_to_string(follower_option.value.get()))
|
||||
lst[follower_idx + 1].append(follower_option.owner.get())
|
||||
except PropertiesOptionError:
|
||||
pass
|
||||
# leader = next leader_iter
|
||||
# if master_values is None:
|
||||
# master_values = await subconfig.value.get()
|
||||
# master_values = subconfig.value.get()
|
||||
return {'keys': keys, 'lst': lst}
|
||||
|
||||
|
||||
async def table(config, prefix_len, values, read_only):
|
||||
def table(config, prefix_len, values, read_only):
|
||||
lst = []
|
||||
for subconfig in await config.option.list(type="all"):
|
||||
for subconfig in config.option.list(type="all"):
|
||||
# prefix = prefix_len * 2 * ' '
|
||||
# if await subconfig.option.isoptiondescription():
|
||||
# if subconfig.option.isoptiondescription():
|
||||
# prefix += '=>'
|
||||
# else:
|
||||
# prefix += '-'
|
||||
# display_str = f'{prefix} {description}'
|
||||
# if name != description:
|
||||
# display_str = f'{display_str} ({name})'
|
||||
name = await subconfig.option.name()
|
||||
doc = await subconfig.option.doc()
|
||||
name = subconfig.option.name()
|
||||
doc = subconfig.option.doc()
|
||||
if prefix_len == 0 and ROUGAIL_NAMESPACE != name:
|
||||
doc = doc.capitalize()
|
||||
if prefix_len == 0 and name == 'services':
|
||||
values['Services'] = {}
|
||||
await services(subconfig, values['Services'])
|
||||
elif await subconfig.option.isoptiondescription():
|
||||
od_name = f'{doc} ({(await subconfig.option.path()).split(".", 1)[1]})'
|
||||
services(subconfig, values['Services'])
|
||||
elif subconfig.option.isoptiondescription():
|
||||
od_name = f'{doc} ({(subconfig.option.path()).split(".", 1)[1]})'
|
||||
values[od_name] = None
|
||||
if await subconfig.option.isleadership():
|
||||
values[od_name] = await table_leader(subconfig, read_only)
|
||||
if subconfig.option.isleadership():
|
||||
values[od_name] = table_leader(subconfig, read_only)
|
||||
else:
|
||||
values[od_name] = await table(subconfig, prefix_len + 1, values, read_only)
|
||||
values[od_name] = table(subconfig, prefix_len + 1, values, read_only)
|
||||
else:
|
||||
value = list_to_string(await subconfig.value.get())
|
||||
doc = await subconfig.option.doc()
|
||||
properties = await subconfig.property.get()
|
||||
value = list_to_string(subconfig.value.get())
|
||||
doc = subconfig.option.doc()
|
||||
properties = subconfig.property.get()
|
||||
if 'mandatory' in properties:
|
||||
doc += '*'
|
||||
name = await subconfig.option.name()
|
||||
name = subconfig.option.name()
|
||||
lst.append([f'{doc} ({name})', value])
|
||||
if read_only:
|
||||
if 'hidden' in properties:
|
||||
|
|
@ -179,7 +178,7 @@ async def table(config, prefix_len, values, read_only):
|
|||
else:
|
||||
hidden = ''
|
||||
lst[-1].append(hidden)
|
||||
lst[-1].append(await subconfig.owner.get())
|
||||
lst[-1].append(subconfig.owner.get())
|
||||
keys = ['Description', 'Valeur']
|
||||
if read_only:
|
||||
keys.append('Cachée')
|
||||
|
|
@ -187,7 +186,7 @@ async def table(config, prefix_len, values, read_only):
|
|||
return {'keys': keys, 'lst': lst}
|
||||
|
||||
|
||||
async def main():
|
||||
def main():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('server_name')
|
||||
parser.add_argument('--read_only', action='store_true')
|
||||
|
|
@ -199,18 +198,18 @@ async def main():
|
|||
|
||||
values = {}
|
||||
server_name = args.server_name
|
||||
config = await load(hide_secret=HIDE_SECRET,
|
||||
config = load(hide_secret=HIDE_SECRET,
|
||||
original_display_name=True,
|
||||
valid_mandatories=args.read_only,
|
||||
)
|
||||
if not args.read_only:
|
||||
await config.property.read_write()
|
||||
config.property.read_write()
|
||||
root_option = config.option(normalize_family(server_name))
|
||||
try:
|
||||
await root_option.option.get()
|
||||
root_option.option.get()
|
||||
except AttributeError:
|
||||
exit(f'Unable to find {server_name} configuration: {[await o.option.description() for o in await config.option.list(type="optiondescription")]}')
|
||||
await table(root_option, 0, values, args.read_only)
|
||||
exit(f'Unable to find {server_name} configuration: {[o.option.description() for o in config.option.list(type="optiondescription")]}')
|
||||
table(root_option, 0, values, args.read_only)
|
||||
for title, dico in values.items():
|
||||
if title == 'Services':
|
||||
if not dico:
|
||||
|
|
@ -233,4 +232,4 @@ async def main():
|
|||
print(tabulate(dico['lst'], headers=dico['keys'], tablefmt="fancy_grid"))
|
||||
|
||||
|
||||
run(main())
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from asyncio import run
|
||||
from argparse import ArgumentParser
|
||||
from traceback import print_exc
|
||||
|
||||
from risotto.machine import remove_cache, build_files, INSTALL_DIR
|
||||
|
||||
|
||||
async def main():
|
||||
def main():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('server_name', nargs='?')
|
||||
parser.add_argument('--nocache', action='store_true')
|
||||
|
|
@ -19,12 +18,12 @@ async def main():
|
|||
remove_cache()
|
||||
|
||||
try:
|
||||
await build_files(None,
|
||||
args.server_name,
|
||||
False,
|
||||
args.copy_tests,
|
||||
template=args.template,
|
||||
)
|
||||
build_files(None,
|
||||
args.server_name,
|
||||
False,
|
||||
args.copy_tests,
|
||||
template=args.template,
|
||||
)
|
||||
except Exception as err:
|
||||
if args.debug:
|
||||
print_exc()
|
||||
|
|
@ -32,4 +31,4 @@ async def main():
|
|||
print(f'templates generated in "{INSTALL_DIR}" directory')
|
||||
|
||||
|
||||
run(main())
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ class ModuleCfg():
|
|||
self.depends = []
|
||||
self.manuals = []
|
||||
self.tests = []
|
||||
self.providers = []
|
||||
self.suppliers = []
|
||||
#self.providers = []
|
||||
#self.suppliers = []
|
||||
|
||||
def __repr__(self):
|
||||
return str(vars(self))
|
||||
|
|
@ -131,10 +131,10 @@ class Modules:
|
|||
if appname not in cfg.providers[provider]:
|
||||
cfg.providers[provider].append(appname)
|
||||
supplier = app.get('supplier')
|
||||
if supplier:
|
||||
self.suppliers.setdefault(supplier, [])
|
||||
if appname not in self.suppliers[supplier]:
|
||||
self.suppliers[supplier].append(appname)
|
||||
#if supplier:
|
||||
# self.suppliers.setdefault(supplier, [])
|
||||
# if appname not in self.suppliers[supplier]:
|
||||
# self.suppliers[supplier].append(appname)
|
||||
if 'distribution' in app and app['distribution']:
|
||||
distribution = appname
|
||||
else:
|
||||
|
|
@ -216,33 +216,57 @@ def applicationservice_copy(src_file: str,
|
|||
copytree(src_file, dst_file)
|
||||
|
||||
|
||||
async def valid_mandatories(config):
|
||||
mandatories = await config.value.mandatory()
|
||||
await config.property.pop('mandatory')
|
||||
def valid_mandatories(config):
|
||||
mandatories = config.value.mandatory()
|
||||
config.property.remove('mandatory')
|
||||
hidden = {}
|
||||
variables = {}
|
||||
title = None
|
||||
if mandatories:
|
||||
server_name = None
|
||||
for mandatory in mandatories:
|
||||
path_server_name, path = mandatory.split('.', 1)
|
||||
var_server_name = await config.option(path_server_name).option.description()
|
||||
for mandatory_option in mandatories:
|
||||
path_server_name, path = mandatory_option.path().split('.', 1)
|
||||
var_server_name = config.option(path_server_name).description()
|
||||
if server_name != var_server_name:
|
||||
server_name = var_server_name
|
||||
title = f'=== Missing variables for {server_name} ==='
|
||||
suboption = config.option(mandatory)
|
||||
text = await suboption.option.doc()
|
||||
text = mandatory_option.doc()
|
||||
msg = f' - {text} ({path})'
|
||||
supplier = await suboption.information.get('supplier', None)
|
||||
supplier = mandatory_option.information.get('supplier', None)
|
||||
if supplier:
|
||||
msg += f' you could add a service that provides "{supplier}"'
|
||||
try:
|
||||
await config.option(mandatory).value.get()
|
||||
variables.setdefault(title, []).append(msg)
|
||||
except PropertiesOptionError as err:
|
||||
if 'hidden' not in err.proptype:
|
||||
raise PropertiesOptionError(err)
|
||||
hidden.setdefault(title, []).append(msg)
|
||||
if mandatory_option.isfollower():
|
||||
leader = mandatory_option.leader()
|
||||
try:
|
||||
leader_value = leader.value.get()
|
||||
except PropertiesOptionError as err:
|
||||
if 'hidden' not in err.proptype:
|
||||
raise err from err
|
||||
hidden.setdefault(title, []).append(msg)
|
||||
else:
|
||||
config.property.add('mandatory')
|
||||
for idx in range(mandatory_option.value.len()):
|
||||
try:
|
||||
config.option(mandatory_option.path(), idx).value.get()
|
||||
except PropertiesOptionError as err:
|
||||
path = leader.path()
|
||||
spath = path.split('.', 1)[1]
|
||||
submsg = f'{msg} at index {idx} (value of leader "{leader.doc()}" ({spath}) is "{leader_value[idx]}")'
|
||||
if 'hidden' in err.proptype:
|
||||
hidden.setdefault(title, []).append(submsg)
|
||||
elif 'mandatory' in err.proptype:
|
||||
variables.setdefault(title, []).append(submsg)
|
||||
else:
|
||||
raise err from err
|
||||
config.property.remove('mandatory')
|
||||
else:
|
||||
try:
|
||||
mandatory_option.value.get()
|
||||
variables.setdefault(title, []).append(msg)
|
||||
except PropertiesOptionError as err:
|
||||
if 'hidden' not in err.proptype:
|
||||
raise err from err
|
||||
hidden.setdefault(title, []).append(msg)
|
||||
if not variables:
|
||||
variables = hidden
|
||||
return variables
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@ from .rougail.annotator import calc_providers, calc_providers_global, calc_provi
|
|||
from rougail import RougailConfig, RougailConvert
|
||||
from os import remove, makedirs, listdir, chmod
|
||||
from os.path import isfile, isdir, abspath, join, dirname
|
||||
from json import dump as json_dump, load as json_load
|
||||
from pickle import dump as pickle_dump, load as pickle_load
|
||||
from yaml import load as yaml_load, SafeLoader
|
||||
from ipaddress import ip_network
|
||||
from ipaddress import IPv4Interface, ip_network
|
||||
#
|
||||
from tiramisu import Config, valid_network_netmask, valid_ip_netmask, valid_broadcast, valid_in_network, valid_not_equal, calc_value
|
||||
from tiramisu import Config, valid_network_netmask, valid_ip_netmask, valid_broadcast, valid_in_network, valid_not_equal, calc_value, calc_value_property_help
|
||||
from rougail.utils import normalize_family
|
||||
from rougail import RougailSystemdTemplate
|
||||
from shutil import copy2, copytree, rmtree
|
||||
|
|
@ -29,8 +29,8 @@ def tiramisu_display_name(kls,
|
|||
|
||||
CONFIG_FILE = 'servers.yml'
|
||||
TIRAMISU_CACHE = 'tiramisu_cache.py'
|
||||
VALUES_CACHE = 'values_cache.json'
|
||||
INFORMATIONS_CACHE = 'informations_cache.json'
|
||||
VALUES_CACHE = 'values_cache.pickle'
|
||||
INFORMATIONS_CACHE = 'informations_cache.pickle'
|
||||
INSTALL_DIR = RISOTTO_CONFIG['directories']['dest']
|
||||
INSTALL_CONFIG_DIR = 'configurations'
|
||||
INSTALL_TMPL_DIR= 'templates'
|
||||
|
|
@ -47,6 +47,7 @@ FUNCTIONS = {'calc_providers': calc_providers,
|
|||
'valid_in_network': valid_in_network,
|
||||
'valid_not_equal': valid_not_equal,
|
||||
'calc_value': calc_value,
|
||||
'calc_value_property_help': calc_value_property_help,
|
||||
'normalize_family': normalize_family,
|
||||
}
|
||||
|
||||
|
|
@ -88,28 +89,28 @@ def remove_cache():
|
|||
remove(INFORMATIONS_CACHE)
|
||||
|
||||
|
||||
async def templates(server_name,
|
||||
config,
|
||||
just_copy=False,
|
||||
copy_manuals=False,
|
||||
template=None,
|
||||
extra_variables=None,
|
||||
):
|
||||
def templates(server_name,
|
||||
config,
|
||||
just_copy=False,
|
||||
copy_manuals=False,
|
||||
template=None,
|
||||
extra_variables=None,
|
||||
):
|
||||
subconfig = config.option(normalize_family(server_name))
|
||||
try:
|
||||
await subconfig.option.get()
|
||||
subconfig.get()
|
||||
except:
|
||||
servers = [await server.option.description() for server in await config.option.list('optiondescription')]
|
||||
servers = [server.description() for server in config.list('optiondescription')]
|
||||
raise Exception(f'cannot find server name "{server_name}": {servers}')
|
||||
|
||||
rougailconfig = RougailConfig.copy()
|
||||
rougailconfig['variable_namespace'] = ROUGAIL_NAMESPACE
|
||||
rougailconfig['variable_namespace_description'] = ROUGAIL_NAMESPACE_DESCRIPTION
|
||||
rougailconfig['tmp_dir'] = 'tmp'
|
||||
rougailconfig['templates_dir'] = await subconfig.information.get('templates_dir')
|
||||
rougailconfig['patches_dir'] = await subconfig.information.get('patches_dir')
|
||||
rougailconfig['functions_file'] = await subconfig.information.get('functions_files')
|
||||
module = await subconfig.information.get('module')
|
||||
rougailconfig['templates_dir'] = subconfig.information.get('templates_dir')
|
||||
rougailconfig['patches_dir'] = subconfig.information.get('patches_dir')
|
||||
rougailconfig['functions_file'] = subconfig.information.get('functions_files')
|
||||
module = subconfig.information.get('module')
|
||||
is_host = module == 'host'
|
||||
if is_host:
|
||||
rougailconfig['systemd_tmpfile_delete_before_create'] = True
|
||||
|
|
@ -138,15 +139,15 @@ async def templates(server_name,
|
|||
engine.engines[eng] = engine.engines['none']
|
||||
try:
|
||||
if not template:
|
||||
await engine.instance_files(extra_variables=extra_variables)
|
||||
engine.instance_files(extra_variables=extra_variables)
|
||||
else:
|
||||
await engine.instance_file(template, extra_variables=extra_variables)
|
||||
engine.instance_file(template, extra_variables=extra_variables)
|
||||
except Exception as err:
|
||||
print()
|
||||
print(f'=== Configuration: {server_name} ===')
|
||||
try:
|
||||
values = await subconfig.value.dict()
|
||||
await value_pprint(values, subconfig)
|
||||
values = subconfig.value.dict()
|
||||
value_pprint(values, subconfig)
|
||||
except:
|
||||
pass
|
||||
raise err from err
|
||||
|
|
@ -159,17 +160,17 @@ async def templates(server_name,
|
|||
if copy_manuals and not is_host:
|
||||
dest_dir = join(INSTALL_DIR, INSTALL_IMAGES_DIR, module)
|
||||
if not isdir(dest_dir):
|
||||
for manual in await subconfig.information.get('manuals_dirs'):
|
||||
for manual in subconfig.information.get('manuals_dirs'):
|
||||
for filename in listdir(manual):
|
||||
src_file = join(manual, filename)
|
||||
dst_file = join(dest_dir, filename)
|
||||
copy(src_file, dst_file)
|
||||
copy_tests = await config.information.get('copy_tests')
|
||||
copy_tests = config.information.get('copy_tests')
|
||||
|
||||
if copy_tests and not is_host:
|
||||
dest_dir = join(INSTALL_DIR, INSTALL_TESTS_DIR, module)
|
||||
if not isdir(dest_dir):
|
||||
for tests in await subconfig.information.get('tests_dirs'):
|
||||
for tests in subconfig.information.get('tests_dirs'):
|
||||
for filename in listdir(tests):
|
||||
src_file = join(tests, filename)
|
||||
dst_file = join(dest_dir, filename)
|
||||
|
|
@ -178,7 +179,6 @@ async def templates(server_name,
|
|||
|
||||
class Loader:
|
||||
def __init__(self,
|
||||
clean_directories,
|
||||
hide_secret,
|
||||
original_display_name,
|
||||
valid_mandatories,
|
||||
|
|
@ -188,10 +188,6 @@ class Loader:
|
|||
self.original_display_name = original_display_name
|
||||
self.valid_mandatories = valid_mandatories
|
||||
self.config_file = config_file
|
||||
if clean_directories:
|
||||
if isdir(INSTALL_DIR):
|
||||
rmtree(INSTALL_DIR)
|
||||
makedirs(INSTALL_DIR)
|
||||
|
||||
def load_tiramisu_file(self):
|
||||
"""Load config file (servers.yml) and build tiramisu file with dataset informations
|
||||
|
|
@ -224,28 +220,54 @@ class Loader:
|
|||
rougail = RougailConvert(cfg)
|
||||
for host_name, datas in self.servers_json['hosts'].items():
|
||||
for server_name, server_datas in datas['servers'].items():
|
||||
for zone in server_datas['informations']['zones_name']:
|
||||
if 'provider_zone' not in server_datas and 'zones_name' not in server_datas:
|
||||
raise Exception(f'cannot find "zones_name" attribute for server "{server_name}"')
|
||||
if 'provider_zone' in server_datas:
|
||||
zones_name.setdefault(server_datas['provider_zone'], []).append(server_name)
|
||||
if 'zones_name' not in server_datas:
|
||||
server_datas['zones_name'] = []
|
||||
if server_datas['provider_zone'] in server_datas['zones_name']:
|
||||
raise Exception(_('provider_zone "{server_datas["provider_zone"]}" must not be in "zones" "{server_datas["zones_name"]}"'))
|
||||
# external zone is better in first place
|
||||
if server_datas['zones_name'] and self.servers_json['zones']['external_zone'] == server_datas['zones_name'][0]:
|
||||
server_datas['zones_name'].append(server_datas['provider_zone'])
|
||||
else:
|
||||
server_datas['zones_name'].insert(0, server_datas['provider_zone'])
|
||||
# if server_datas['zones_name'] and server_datas['provider_zone'] == self.servers_json['zones']['external_zone']:
|
||||
# server_datas['zones_name'].insert(0, server_datas['provider_zone'])
|
||||
# else:
|
||||
# server_datas['zones_name'].append(server_datas['provider_zone'])
|
||||
for zone in server_datas['zones_name']:
|
||||
zones_name.setdefault(zone, []).append(server_name)
|
||||
self.zones = {}
|
||||
zones_network = ip_network(self.servers_json['zones']['network'])
|
||||
zone_start_ip = zones_network.network_address
|
||||
domain_name = self.servers_json['zones']['prefix_domain_name']
|
||||
for idx, zone_name in enumerate(zones_name):
|
||||
sub_network = ip_network(f'{zone_start_ip}/28')
|
||||
if not sub_network.subnet_of(zones_network):
|
||||
raise Exception('not enough IP available')
|
||||
if sub_network.num_addresses < len(zones_name[zone_name]):
|
||||
#FIXME should try to increase network!
|
||||
raise Exception(f'network too small for zone {zone_name}')
|
||||
if idx == 0:
|
||||
for zone_name in zones_name:
|
||||
len_zone = len(zones_name[zone_name])
|
||||
for zone_cidr in [29, 28, 27, 26]:
|
||||
try:
|
||||
sub_network = ip_network(f'{zone_start_ip}/{zone_cidr}')
|
||||
except ValueError:
|
||||
# calc network address for this mask
|
||||
zone_start_ip = IPv4Interface(f'{zone_start_ip}/{zone_cidr}').network.broadcast_address + 1
|
||||
sub_network = ip_network(f'{zone_start_ip}/{zone_cidr}')
|
||||
if not sub_network.subnet_of(zones_network):
|
||||
raise Exception('not enough IP available')
|
||||
length = sub_network.num_addresses - 3 # network + broadcast + host
|
||||
if length >= len_zone:
|
||||
break
|
||||
else:
|
||||
raise Exception(f'network too small for zone "{zone_name}" ({sub_network.num_addresses - 2} < {len_zone})')
|
||||
if self.servers_json['zones']['external_zone'] == zone_name:
|
||||
zone_domaine_name = domain_name
|
||||
else:
|
||||
zone_domaine_name = zone_name + '.' + domain_name
|
||||
|
||||
network = sub_network.network_address
|
||||
self.zones[zone_name] = {'domain_name': zone_domaine_name,
|
||||
'network': str(sub_network),
|
||||
'host_ip': str(network + 1),
|
||||
'length': length,
|
||||
'start_ip': str(network + 2)
|
||||
}
|
||||
zone_start_ip = str(sub_network.broadcast_address + 1)
|
||||
|
|
@ -266,9 +288,13 @@ class Loader:
|
|||
|
||||
# load host
|
||||
module_info = modules.get('host')
|
||||
tls_host_name = f'{server_name}.{self.zones[list(self.zones)[0]]["domain_name"]}'
|
||||
cfg['risotto_globals'][host_name] = {'global:server_name': host_name,
|
||||
'global:server_names': [host_name for zone in self.zones],
|
||||
'global:zones_name': list(self.zones),
|
||||
'global:module_name': 'host',
|
||||
'global:host_install_dir': abspath(INSTALL_DIR),
|
||||
'global:tls_server': tls_host_name,
|
||||
}
|
||||
functions_files |= set(module_info.functions_file)
|
||||
self.load_dictionaries(cfg,
|
||||
|
|
@ -280,7 +306,7 @@ class Loader:
|
|||
modules_info = {}
|
||||
for server_name, server_datas in datas['servers'].items():
|
||||
module_info = modules.get(server_datas['applicationservice'])
|
||||
zones_name = server_datas['informations']['zones_name']
|
||||
zones_name = server_datas['zones_name']
|
||||
values = [f'{server_name}.{self.zones[zone_name]["domain_name"]}' for zone_name in zones_name]
|
||||
if server_datas['applicationservice'] == 'tls':
|
||||
true_host_name = f'{server_name}.{self.zones[list(self.zones)[0]]["domain_name"]}'
|
||||
|
|
@ -292,7 +318,10 @@ class Loader:
|
|||
'global:zones_name': zones_name,
|
||||
'global:zones_list': list(range(len(zones_name))),
|
||||
'global:module_name': server_datas['applicationservice'],
|
||||
'global:prefix_domain_name': self.servers_json['zones']['prefix_domain_name']
|
||||
}
|
||||
if 'provider_zone' in server_datas:
|
||||
cfg['risotto_globals'][true_host_name]['global:provider_zone'] = server_datas['provider_zone']
|
||||
server_datas['server_name'] = true_host_name
|
||||
functions_files |= set(module_info.functions_file)
|
||||
self.load_dictionaries(cfg,
|
||||
|
|
@ -309,26 +338,24 @@ class Loader:
|
|||
zones = set()
|
||||
dns_module_name = None
|
||||
for host in self.servers_json['hosts'].values():
|
||||
zones = [None, None]
|
||||
zones = [self.servers_json['zones']['external_zone'], None]
|
||||
for server_name, datas in host['servers'].items():
|
||||
if not 'applicationservice' in datas:
|
||||
raise Exception(f'cannot find applicationservice for "{server_name}"')
|
||||
if datas['applicationservice'] == 'tls':
|
||||
raise Exception(f'forbidden module name "tls" for server {server_name}')
|
||||
raise Exception(f'forbidden module name "tls" for server "{server_name}"')
|
||||
#FIXME use provider!
|
||||
if datas['applicationservice'] == 'nginx-reverse-proxy' and len(datas['informations']['zones_name']) > 0:
|
||||
if datas['applicationservice'] == 'nginx-reverse-proxy' and len(datas['zones_name']) > 0:
|
||||
if dns_module_name:
|
||||
break
|
||||
# always add tls machine in second zone of reverse proxy
|
||||
zones[1] = datas['informations']['zones_name'][0]
|
||||
if datas['applicationservice'] == 'unbound':
|
||||
# always add tls machine in second zone of reverse proxy
|
||||
zones[0] = datas['informations']['zones_name'][0]
|
||||
zones[1] = datas['provider_zone']
|
||||
if None in zones:
|
||||
zones = []
|
||||
else:
|
||||
if zones[0] == zones[1]:
|
||||
zones = [zones[0]]
|
||||
host['servers']['tls'] = {'applicationservice': 'tls',
|
||||
'informations': {'zones_name': list(zones)},
|
||||
'zones_name': list(zones),
|
||||
}
|
||||
|
||||
def load_dictionaries(self, cfg, module_info, server_name, rougail):
|
||||
|
|
@ -344,7 +371,7 @@ class Loader:
|
|||
self.manuals_dirs[server_name] = module_info.manuals
|
||||
self.tests_dirs[server_name] = module_info.tests
|
||||
|
||||
async def tiramisu_file_to_tiramisu(self):
|
||||
def tiramisu_file_to_tiramisu(self):
|
||||
# l
|
||||
tiramisu_space = FUNCTIONS.copy()
|
||||
try:
|
||||
|
|
@ -356,78 +383,78 @@ class Loader:
|
|||
display_name = None
|
||||
else:
|
||||
display_name = tiramisu_display_name
|
||||
self.config = await Config(tiramisu_space['option_0'],
|
||||
self.config = Config(tiramisu_space['option_0'],
|
||||
display_name=display_name,
|
||||
)
|
||||
|
||||
async def load_values_and_informations(self):
|
||||
def load_values_and_informations(self):
|
||||
config = self.config
|
||||
await config.property.read_write()
|
||||
await config.property.pop('validator')
|
||||
await config.property.pop('cache')
|
||||
config.property.read_write()
|
||||
config.property.remove('validator')
|
||||
config.property.remove('cache')
|
||||
load_zones(self.zones, self.servers_json['hosts'])
|
||||
await config.information.set('zones', self.zones)
|
||||
config.information.set('zones', self.zones)
|
||||
for host_name, hosts_datas in self.servers_json['hosts'].items():
|
||||
information = config.option(normalize_family(host_name)).information
|
||||
await information.set('module', 'host')
|
||||
await information.set('templates_dir', self.templates_dir[host_name])
|
||||
await information.set('patches_dir', self.patches_dir[host_name])
|
||||
await information.set('functions_files', self.functions_files[host_name])
|
||||
await self.set_values(host_name, config, hosts_datas)
|
||||
information.set('module', 'host')
|
||||
information.set('templates_dir', self.templates_dir[host_name])
|
||||
information.set('patches_dir', self.patches_dir[host_name])
|
||||
information.set('functions_files', self.functions_files[host_name])
|
||||
self.set_values(host_name, config, hosts_datas)
|
||||
for datas in hosts_datas['servers'].values():
|
||||
server_name = datas['server_name']
|
||||
information = config.option(normalize_family(server_name)).information
|
||||
await information.set('module', datas['applicationservice'])
|
||||
await information.set('templates_dir', self.templates_dir[server_name])
|
||||
await information.set('patches_dir', self.patches_dir[server_name])
|
||||
await information.set('functions_files', self.functions_files[server_name])
|
||||
await information.set('manuals_dirs', self.manuals_dirs[server_name])
|
||||
await information.set('tests_dirs', self.tests_dirs[server_name])
|
||||
await self.set_values(server_name, config, datas)
|
||||
information.set('module', datas['applicationservice'])
|
||||
information.set('templates_dir', self.templates_dir[server_name])
|
||||
information.set('patches_dir', self.patches_dir[server_name])
|
||||
information.set('functions_files', self.functions_files[server_name])
|
||||
information.set('manuals_dirs', self.manuals_dirs[server_name])
|
||||
information.set('tests_dirs', self.tests_dirs[server_name])
|
||||
self.set_values(server_name, config, datas)
|
||||
|
||||
await config.information.set('copy_tests', False)
|
||||
config.information.set('copy_tests', False)
|
||||
# FIXME only one host_name is supported
|
||||
await config.information.set('modules', self.modules[host_name])
|
||||
# await config.information.set('modules', {module_name: module_info.depends for module_name, module_info in self.module_infos.items() if module_name in modules})
|
||||
await config.property.add('cache')
|
||||
config.information.set('modules', self.modules[host_name])
|
||||
# config.information.set('modules', {module_name: module_info.depends for module_name, module_info in self.module_infos.items() if module_name in modules})
|
||||
config.property.add('cache')
|
||||
if self.valid_mandatories:
|
||||
messages = await valid_mandatories(config)
|
||||
messages = valid_mandatories(config)
|
||||
if messages:
|
||||
msg = ''
|
||||
for title, variables in messages.items():
|
||||
msg += '\n' + title + '\n'
|
||||
msg += '\n'.join(variables)
|
||||
raise Exception(msg)
|
||||
await config.property.read_only()
|
||||
with open(VALUES_CACHE, 'w') as fh:
|
||||
json_dump(await config.value.exportation(), fh)
|
||||
with open(INFORMATIONS_CACHE, 'w') as fh:
|
||||
json_dump(await config.information.exportation(), fh)
|
||||
config.property.read_only()
|
||||
with open(VALUES_CACHE, 'wb') as fh:
|
||||
pickle_dump(config.value.exportation(), fh)
|
||||
with open(INFORMATIONS_CACHE, 'wb') as fh:
|
||||
pickle_dump(config.information.exportation(), fh)
|
||||
|
||||
async def set_values(self,
|
||||
server_name,
|
||||
config,
|
||||
datas,
|
||||
):
|
||||
def set_values(self,
|
||||
server_name,
|
||||
config,
|
||||
datas,
|
||||
):
|
||||
if 'values' not in datas:
|
||||
return
|
||||
if not isinstance(datas['values'], dict):
|
||||
raise Exception(f'Values of "{server_name}" are not a dict: {datas["values"]}')
|
||||
server_path = normalize_family(server_name)
|
||||
await config.owner.set(self.config_file)
|
||||
config.owner.set(self.config_file)
|
||||
for vpath, value in datas['values'].items():
|
||||
path = f'{server_path}.{vpath}'
|
||||
try:
|
||||
if isinstance(value, dict):
|
||||
for idx, val in value.items():
|
||||
await config.option(path, int(idx)).value.set(val)
|
||||
config.option(path, int(idx)).value.set(val)
|
||||
else:
|
||||
await config.option(path).value.set(value)
|
||||
config.option(path).value.set(value)
|
||||
except Exception as err:
|
||||
await value_pprint(await config.value.dict(), config)
|
||||
value_pprint(config.value.dict(), config)
|
||||
error_msg = f'cannot configure variable {vpath} for server "{server_name}": {err}'
|
||||
raise Exception(error_msg) from err
|
||||
await config.owner.set('user')
|
||||
config.owner.set('user')
|
||||
|
||||
|
||||
class LoaderCache(Loader):
|
||||
|
|
@ -435,63 +462,60 @@ class LoaderCache(Loader):
|
|||
with open(TIRAMISU_CACHE) as fh:
|
||||
self.tiram_obj = fh.read()
|
||||
|
||||
async def load_values_and_informations(self):
|
||||
with open(VALUES_CACHE, 'r') as fh:
|
||||
await self.config.value.importation(json_load(fh))
|
||||
with open(INFORMATIONS_CACHE, 'r') as fh:
|
||||
informations = json_load(fh)
|
||||
# null is not a valid key in json => 'null'
|
||||
informations[None] = informations.pop('null')
|
||||
await self.config.information.importation(informations)
|
||||
def load_values_and_informations(self):
|
||||
with open(VALUES_CACHE, 'rb') as fh:
|
||||
self.config.value.importation(pickle_load(fh))
|
||||
with open(INFORMATIONS_CACHE, 'rb') as fh:
|
||||
self.config.information.importation(pickle_load(fh))
|
||||
|
||||
|
||||
async def load(clean_directories=False,
|
||||
hide_secret=False,
|
||||
original_display_name: bool=False,
|
||||
valid_mandatories: bool=True,
|
||||
copy_tests: bool=False,
|
||||
):
|
||||
def load(hide_secret=False,
|
||||
original_display_name: bool=False,
|
||||
valid_mandatories: bool=True,
|
||||
copy_tests: bool=False,
|
||||
):
|
||||
if isfile(TIRAMISU_CACHE) and isfile(VALUES_CACHE) and isfile(INFORMATIONS_CACHE):
|
||||
loader_obj = LoaderCache
|
||||
else:
|
||||
loader_obj = Loader
|
||||
loader = loader_obj(clean_directories,
|
||||
hide_secret,
|
||||
loader = loader_obj(hide_secret,
|
||||
original_display_name,
|
||||
valid_mandatories,
|
||||
)
|
||||
loader.load_tiramisu_file()
|
||||
await loader.tiramisu_file_to_tiramisu()
|
||||
await loader.load_values_and_informations()
|
||||
loader.tiramisu_file_to_tiramisu()
|
||||
loader.load_values_and_informations()
|
||||
config = loader.config
|
||||
await config.property.read_only()
|
||||
await config.information.set('copy_tests', copy_tests)
|
||||
await config.cache.reset()
|
||||
config.property.read_only()
|
||||
config.information.set('copy_tests', copy_tests)
|
||||
config.cache.reset()
|
||||
return config
|
||||
|
||||
|
||||
async def build_files(hostname: str,
|
||||
only_machine: str,
|
||||
just_copy: bool,
|
||||
copy_tests: bool,
|
||||
template: str=None,
|
||||
) -> None:
|
||||
def build_files(hostname: str,
|
||||
only_machine: str,
|
||||
just_copy: bool,
|
||||
copy_tests: bool,
|
||||
template: str=None,
|
||||
) -> None:
|
||||
if isdir(INSTALL_DIR):
|
||||
rmtree(INSTALL_DIR)
|
||||
makedirs(INSTALL_DIR)
|
||||
with open(CONFIG_FILE, 'r') as server_fh:
|
||||
servers_json = yaml_load(server_fh, Loader=SafeLoader)
|
||||
config = await load(copy_tests=copy_tests)
|
||||
machines = [await subconfig.option.description() for subconfig in await config.option.list(type='optiondescription')]
|
||||
config = load(copy_tests=copy_tests)
|
||||
machines = [subconfig.description() for subconfig in config.option.list(type='optiondescription')]
|
||||
certificates = {'certificates': {},
|
||||
'configuration': servers_json['certificates'],
|
||||
}
|
||||
# get certificates informations
|
||||
tls_machine = None
|
||||
tls_machine = config.option(f'{normalize_family(hostname)}.general.tls_server').value.get()
|
||||
for machine in machines:
|
||||
if machine.startswith('tls.'):
|
||||
tls_machine = machine
|
||||
if machine == tls_machine:
|
||||
continue
|
||||
if hostname is None:
|
||||
# FIXME multi host!
|
||||
hostname = await config.option(normalize_family(machine)).option('general.host_name').value.get()
|
||||
hostname = config.option(normalize_family(machine)).option('general.host_name').value.get()
|
||||
if just_copy:
|
||||
continue
|
||||
is_host = machine == hostname
|
||||
|
|
@ -500,24 +524,24 @@ async def build_files(hostname: str,
|
|||
machine_config = config.option(normalize_family(machine))
|
||||
certificate_names = []
|
||||
private_names = []
|
||||
for service in await machine_config.option('services').option.list('optiondescription'):
|
||||
if not await service.option('activate').value.get():
|
||||
for service in machine_config.option('services').list('optiondescription'):
|
||||
if not service.option('activate').value.get():
|
||||
continue
|
||||
# if await service.option('manage').value.get():
|
||||
# if service.option('manage').value.get():
|
||||
# certificate_type = 'server'
|
||||
# else:
|
||||
# certificate_type = 'client'
|
||||
tls_ca_directory = await machine_config.option('general.tls_ca_directory').value.get()
|
||||
tls_cert_directory = await machine_config.option('general.tls_cert_directory').value.get()
|
||||
tls_key_directory = await machine_config.option('general.tls_key_directory').value.get()
|
||||
tls_ca_directory = machine_config.option('general.tls_ca_directory').value.get()
|
||||
tls_cert_directory = machine_config.option('general.tls_cert_directory').value.get()
|
||||
tls_key_directory = machine_config.option('general.tls_key_directory').value.get()
|
||||
try:
|
||||
for certificate in await service.option('certificates').option.list('all'):
|
||||
if not await certificate.option('activate').value.get():
|
||||
for certificate in service.option('certificates').list('all'):
|
||||
if not certificate.option('activate').value.get():
|
||||
continue
|
||||
certificate_data = await certificate.value.dict()
|
||||
certificate_data['type'] = await certificate.information.get('type')
|
||||
certificate_data['authority'] = join(tls_ca_directory, await certificate.information.get('authority') + '.crt')
|
||||
certificate_data['format'] = await certificate.information.get('format')
|
||||
certificate_data = {key.rsplit('.', 1)[1]: value for key, value in certificate.value.dict().items()}
|
||||
certificate_data['type'] = certificate.information.get('type')
|
||||
certificate_data['authority'] = join(tls_ca_directory, certificate.information.get('authority') + '.crt')
|
||||
certificate_data['format'] = certificate.information.get('format')
|
||||
is_list_name = isinstance(certificate_data['name'], list)
|
||||
is_list_domain = isinstance(certificate_data['domain'], list)
|
||||
if is_list_name != is_list_domain:
|
||||
|
|
@ -526,7 +550,7 @@ async def build_files(hostname: str,
|
|||
certificate_data['provider'] = 'autosigne'
|
||||
if is_list_name:
|
||||
if len(certificate_data['name']) != len(certificate_data['domain']):
|
||||
raise Exception('certificate name and domain name must have same lenght')
|
||||
raise Exception('certificate name and domain name must have same length')
|
||||
for idx, certificate_name in enumerate(certificate_data['name']):
|
||||
cert_data = certificate_data.copy()
|
||||
if certificate_data['format'] == 'cert_key':
|
||||
|
|
@ -565,19 +589,19 @@ async def build_files(hostname: str,
|
|||
continue
|
||||
if only_machine and only_machine != machine:
|
||||
continue
|
||||
await templates(machine,
|
||||
config,
|
||||
just_copy=just_copy,
|
||||
copy_manuals=True,
|
||||
template=template,
|
||||
extra_variables=certificates,
|
||||
)
|
||||
templates(machine,
|
||||
config,
|
||||
just_copy=just_copy,
|
||||
copy_manuals=True,
|
||||
template=template,
|
||||
extra_variables=certificates,
|
||||
)
|
||||
is_host = machine == hostname
|
||||
if is_host:
|
||||
directories[machine] = '/usr/local/lib'
|
||||
elif not just_copy:
|
||||
machine_config = config.option(normalize_family(machine))
|
||||
directories[machine] = await machine_config.option('general.config_dir').value.get()
|
||||
directories[machine] = machine_config.option('general.config_dir').value.get()
|
||||
if only_machine:
|
||||
return directories
|
||||
if only_machine:
|
||||
|
|
|
|||
|
|
@ -26,19 +26,19 @@ def _parse_kwargs(provider, dns, kwargs, index=None):
|
|||
continue
|
||||
elif data['dns'] not in dns:
|
||||
continue
|
||||
del data['dns']
|
||||
# del data['dns']
|
||||
yield data
|
||||
|
||||
|
||||
@multi_function
|
||||
def calc_providers_global(provider, multi, value, suffix=None):
|
||||
def calc_providers_global(provider, multi, unique, value, suffix=None):
|
||||
if suffix is not None:
|
||||
return value[int(suffix)]
|
||||
return value
|
||||
|
||||
|
||||
@multi_function
|
||||
def calc_providers_follower(provider, multi, dns, leader, index, **kwargs):
|
||||
def calc_providers_follower(provider, multi, unique, dns, leader, index, **kwargs):
|
||||
ret = []
|
||||
for data in _parse_kwargs(provider, dns, kwargs):
|
||||
if 'value' not in data:
|
||||
|
|
@ -64,7 +64,7 @@ def calc_providers_follower(provider, multi, dns, leader, index, **kwargs):
|
|||
|
||||
|
||||
@multi_function
|
||||
def calc_providers_dynamic_follower(provider, multi, dns, leader, index, suffix, **kwargs):
|
||||
def calc_providers_dynamic_follower(provider, multi, unique, dns, leader, index, suffix, **kwargs):
|
||||
ret = []
|
||||
for data in _parse_kwargs(provider, dns, kwargs):
|
||||
if 'value' not in data:
|
||||
|
|
@ -92,7 +92,7 @@ def calc_providers_dynamic_follower(provider, multi, dns, leader, index, suffix,
|
|||
|
||||
|
||||
@multi_function
|
||||
def calc_providers_dynamic(provider, multi, dns, suffix, **kwargs):
|
||||
def calc_providers_dynamic(provider, multi, unique, dns, suffix, **kwargs):
|
||||
ret = []
|
||||
for data in _parse_kwargs(provider, dns, kwargs):
|
||||
if 'value' not in data:
|
||||
|
|
@ -101,7 +101,7 @@ def calc_providers_dynamic(provider, multi, dns, suffix, **kwargs):
|
|||
continue
|
||||
if isinstance(data['value'], list):
|
||||
for v in data['value']:
|
||||
if v not in ret:
|
||||
if not unique or v not in ret:
|
||||
ret.append(v)
|
||||
elif data['value'] not in ret:
|
||||
ret.append(data['value'])
|
||||
|
|
@ -112,10 +112,14 @@ def calc_providers_dynamic(provider, multi, dns, suffix, **kwargs):
|
|||
|
||||
|
||||
@multi_function
|
||||
def calc_providers(provider, multi, dns, suffix=None, **kwargs):
|
||||
def calc_providers(provider, multi, unique, dns, **kwargs):
|
||||
ret = []
|
||||
for data in _parse_kwargs(provider, dns, kwargs):
|
||||
if isinstance(data['value'], list):
|
||||
commun_dns = list(set(data['dns']) & set(dns))
|
||||
if len(commun_dns) == 1:
|
||||
ret.append(data['value'][data['dns'].index(commun_dns[0])])
|
||||
continue
|
||||
for v in data['value']:
|
||||
if v in ret:
|
||||
continue
|
||||
|
|
@ -135,216 +139,289 @@ class Annotator(Walk):
|
|||
objectspace: 'RougailObjSpace',
|
||||
*args):
|
||||
self.objectspace = objectspace
|
||||
self.set_suppliers()
|
||||
self.get_suppliers_providers()
|
||||
self.dispatch_provider_supplier_to_zones()
|
||||
self.dispatch_provider_to_zones()
|
||||
self.convert_providers()
|
||||
self.convert_suppliers()
|
||||
|
||||
def set_suppliers(self) -> dict:
|
||||
def get_suppliers_providers(self) -> None:
|
||||
""" get supplier informations
|
||||
return something like:
|
||||
{'Host': ['host1.example.net', 'host2.example.net']}
|
||||
"""
|
||||
self.suppliers = {}
|
||||
for variable in self.get_variables():
|
||||
if not hasattr(variable, 'supplier') or ':' in variable.supplier:
|
||||
continue
|
||||
nf_dns = variable.path.split('.', 1)[0]
|
||||
server_name = self.objectspace.space.variables[nf_dns].doc
|
||||
self.suppliers.setdefault(variable.supplier, []).append({'option': variable,
|
||||
'dns': server_name,
|
||||
'path_prefix': nf_dns,
|
||||
'server_names': self.objectspace.rougailconfig['risotto_globals'][server_name]['global:server_names'],
|
||||
'zone_names': self.objectspace.rougailconfig['risotto_globals'][server_name]['global:zones_name'],
|
||||
'zones': set(self.objectspace.rougailconfig['risotto_globals'][server_name]['global:zones_name'])
|
||||
})
|
||||
if not hasattr(variable, 'information'):
|
||||
variable.information = self.objectspace.information(variable.xmlfiles)
|
||||
variable.information.supplier = variable.supplier
|
||||
|
||||
def convert_providers(self):
|
||||
self.providers = {}
|
||||
for variable in self.get_variables():
|
||||
if not hasattr(variable, 'provider'):
|
||||
if not hasattr(variable, 'supplier') and not hasattr(variable, 'provider'):
|
||||
continue
|
||||
nf_dns = variable.path.split('.', 1)[0]
|
||||
server_name = self.objectspace.space.variables[nf_dns].doc
|
||||
provider_name = variable.provider
|
||||
if self.objectspace.rougailconfig['risotto_globals'][server_name]['global:module_name'] == 'host':
|
||||
server_names = [server_name]
|
||||
else:
|
||||
# supplier
|
||||
if hasattr(variable, 'supplier') and ':' not in variable.supplier:
|
||||
server_names = self.objectspace.rougailconfig['risotto_globals'][server_name]['global:server_names']
|
||||
if provider_name != 'Host' and not provider_name.startswith('Host:') and not provider_name.startswith('global:'):
|
||||
p_data = {'option': variable,
|
||||
zones = self.objectspace.rougailconfig['risotto_globals'][server_name]['global:zones_name']
|
||||
s_data = {'option': variable,
|
||||
'dns': server_name,
|
||||
'path_prefix': nf_dns,
|
||||
'server_names': server_names,
|
||||
'zone_names': self.objectspace.rougailconfig['risotto_globals'][server_name]['global:zones_name'],
|
||||
'zones': set(self.objectspace.rougailconfig['risotto_globals'][server_name]['global:zones_name']),
|
||||
'zones': zones,
|
||||
'providers_zone': {},
|
||||
}
|
||||
else:
|
||||
p_data = None
|
||||
if ':' in provider_name:
|
||||
key_name, key_type = provider_name.rsplit(':', 1)
|
||||
is_provider = False
|
||||
else:
|
||||
key_name = key_type = provider_name
|
||||
is_provider = True
|
||||
if provider_name != 'Host':
|
||||
self.providers.setdefault(provider_name, []).append(p_data)
|
||||
if key_name != 'global' and key_name not in self.suppliers:
|
||||
#warn(f'cannot find supplier "{key_name}" for "{server_name}"')
|
||||
continue
|
||||
# create a fill for this variable
|
||||
fill = self.objectspace.fill(variable.xmlfiles)
|
||||
new_target = self.objectspace.target(variable.xmlfiles)
|
||||
new_target.name = variable
|
||||
fill.target = [new_target]
|
||||
if key_name == 'global':
|
||||
fill.name = 'calc_providers_global'
|
||||
elif self.objectspace.paths.is_dynamic(variable):
|
||||
if self.objectspace.paths.is_follower(variable):
|
||||
fill.name = 'calc_providers_dynamic_follower'
|
||||
if variable.supplier != 'Host' and not variable.supplier.startswith('Host:') and not variable.supplier.startswith('global:'):
|
||||
if 'global:provider_zone' in self.objectspace.rougailconfig['risotto_globals'][server_name]:
|
||||
s_data['provider_zone'] = self.objectspace.rougailconfig['risotto_globals'][server_name]['global:provider_zone']
|
||||
self.suppliers.setdefault(variable.supplier, []).append(s_data)
|
||||
if not hasattr(variable, 'information'):
|
||||
variable.information = self.objectspace.information(variable.xmlfiles)
|
||||
variable.information.supplier = variable.supplier
|
||||
# provider
|
||||
if hasattr(variable, 'provider'):
|
||||
provider_name = variable.provider
|
||||
p_data = {'option': variable,
|
||||
'dns': server_name,
|
||||
'provider_name': provider_name,
|
||||
'path_prefix': nf_dns,
|
||||
'suppliers_zone': {},
|
||||
}
|
||||
if variable.provider != 'Host' and not variable.provider.startswith('Host:') and not variable.provider.startswith('global:'):
|
||||
if 'global:provider_zone' in self.objectspace.rougailconfig['risotto_globals'][server_name]:
|
||||
p_data['provider_zone'] = self.objectspace.rougailconfig['risotto_globals'][server_name]['global:provider_zone']
|
||||
if self.objectspace.rougailconfig['risotto_globals'][server_name]['global:module_name'] == 'host':
|
||||
server_names = [server_name]
|
||||
else:
|
||||
fill.name = 'calc_providers_dynamic'
|
||||
elif self.objectspace.paths.is_follower(variable):
|
||||
fill.name = 'calc_providers_follower'
|
||||
else:
|
||||
fill.name = 'calc_providers'
|
||||
fill.namespace = variable.namespace
|
||||
fill.index = 0
|
||||
# first parameter: the provider name (something link Host:incoming_ports)
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'provider'
|
||||
param.text = provider_name
|
||||
fill.param = [param]
|
||||
# second parameter: current variable is a multi variable?
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'multi'
|
||||
param.text = variable.multi
|
||||
param.type = 'boolean'
|
||||
fill.param.append(param)
|
||||
if self.objectspace.paths.is_follower(variable):
|
||||
server_names = self.objectspace.rougailconfig['risotto_globals'][server_name]['global:server_names']
|
||||
p_data['server_names'] = server_names
|
||||
if self.objectspace.rougailconfig['risotto_globals'][server_name]['global:module_name'] != 'host':
|
||||
p_data['zones'] = self.objectspace.rougailconfig['risotto_globals'][server_name]['global:zones_name']
|
||||
if ':' in provider_name:
|
||||
p_data['is_main_provider'] = False
|
||||
provider = provider_name.rsplit(':', 1)[0]
|
||||
else:
|
||||
p_data['is_main_provider'] = True
|
||||
provider = variable.provider
|
||||
self.providers.setdefault(provider, []).append(p_data)
|
||||
|
||||
def dispatch_provider_supplier_to_zones(self):
|
||||
"""calculate zone where provider and supplier communicate
|
||||
"""
|
||||
self.providers_zone = {}
|
||||
for provider_name, p_datas in self.providers.items():
|
||||
for p_data in p_datas:
|
||||
if provider_name in ['global', 'Host'] or provider_name not in self.suppliers:
|
||||
continue
|
||||
if not 'provider_zone' in p_data:
|
||||
provider_zone = None
|
||||
else:
|
||||
provider_zone = p_data['provider_zone']
|
||||
for s_data in self.suppliers[provider_name]:
|
||||
if not provider_zone:
|
||||
if provider_name.endswith('Client'):
|
||||
p_server = provider_name[0:-6]
|
||||
if p_server not in self.providers:
|
||||
continue
|
||||
for p_data_t in self.providers[p_server]:
|
||||
if p_data_t['dns'] == s_data['dns'] and 'provider_zone' in p_data_t:
|
||||
zone = p_data_t['provider_zone']
|
||||
break
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
zone = provider_zone
|
||||
if zone not in s_data['zones']:
|
||||
continue
|
||||
s_data['providers_zone'][provider_name] = zone
|
||||
p_data['suppliers_zone'].setdefault(provider_name, {})[s_data['dns']] = zone
|
||||
self.providers_zone.setdefault(zone, set()).add(provider_name)
|
||||
|
||||
def dispatch_provider_to_zones(self):
|
||||
""" add information with provider zone domain name
|
||||
"""
|
||||
self.providers_zone = {}
|
||||
for provider_name, p_datas in self.providers.items():
|
||||
for p_data in p_datas:
|
||||
if provider_name in ['global', 'Host'] or provider_name not in self.suppliers:
|
||||
continue
|
||||
if not 'provider_zone' in p_data:
|
||||
continue
|
||||
provider_zone = p_data['provider_zone']
|
||||
family = self.objectspace.paths.get_variable(f"providers",
|
||||
namespace=self.objectspace.rougailconfig['variable_namespace'],
|
||||
force_path_prefix=p_data['path_prefix'],
|
||||
)
|
||||
if not hasattr(family, 'information'):
|
||||
family.information = self.objectspace.information(family.xmlfiles)
|
||||
name_in_zone = p_data['server_names'][p_data['zones'].index(provider_zone)]
|
||||
setattr(family.information, provider_name, name_in_zone)
|
||||
setattr(family.information, f'{provider_name}:zone', provider_zone)
|
||||
|
||||
def convert_providers(self):
|
||||
for provider_name, providers_data in self.providers.items():
|
||||
for provider_data in providers_data:
|
||||
if provider_name != 'global' and provider_name not in self.suppliers:
|
||||
continue
|
||||
# create a fill for this variable
|
||||
variable = provider_data['option']
|
||||
fill = self.objectspace.fill(variable.xmlfiles)
|
||||
new_target = self.objectspace.target(variable.xmlfiles)
|
||||
new_target.name = variable
|
||||
fill.target = [new_target]
|
||||
if provider_name == 'global':
|
||||
fill.name = 'calc_providers_global'
|
||||
elif self.objectspace.paths.is_dynamic(variable):
|
||||
if self.objectspace.paths.is_follower(variable):
|
||||
fill.name = 'calc_providers_dynamic_follower'
|
||||
else:
|
||||
fill.name = 'calc_providers_dynamic'
|
||||
elif self.objectspace.paths.is_follower(variable):
|
||||
fill.name = 'calc_providers_follower'
|
||||
else:
|
||||
fill.name = 'calc_providers'
|
||||
fill.namespace = variable.namespace
|
||||
fill.index = 0
|
||||
# first parameter: the provider name (something link Host:incoming_ports)
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'leader'
|
||||
param.text = self.objectspace.paths.get_leader(variable)
|
||||
param.propertyerror = False
|
||||
param.type = 'variable'
|
||||
param.name = 'provider'
|
||||
param.text = provider_data['provider_name']
|
||||
fill.param = [param]
|
||||
# second parameter: current variable is a multi variable?
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'multi'
|
||||
param.text = variable.multi
|
||||
param.type = 'boolean'
|
||||
fill.param.append(param)
|
||||
try:
|
||||
leader_provider = self.objectspace.paths.get_leader(variable).provider
|
||||
except:
|
||||
leader_provider = None
|
||||
#
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'index'
|
||||
param.type = 'index'
|
||||
param.name = 'unique'
|
||||
param.text = variable.unique != "False"
|
||||
param.type = 'boolean'
|
||||
fill.param.append(param)
|
||||
if self.objectspace.paths.is_dynamic(variable):
|
||||
# if dynamic: current suffix
|
||||
# and add current DNS name, this is useful to known if supplier is link to this provider
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'suffix'
|
||||
param.type = 'suffix'
|
||||
fill.param.append(param)
|
||||
if key_name != 'global':
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'dns'
|
||||
param.text = server_names
|
||||
fill.param.append(param)
|
||||
if key_name == 'global':
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'value'
|
||||
if provider_name in self.objectspace.rougailconfig['risotto_globals'][server_name]:
|
||||
value = self.objectspace.rougailconfig['risotto_globals'][server_name][provider_name]
|
||||
param.text = value
|
||||
if isinstance(value, bool):
|
||||
param.type = 'boolean'
|
||||
else:
|
||||
param.text = provider_name
|
||||
param.type = 'information'
|
||||
fill.param.append(param)
|
||||
else:
|
||||
# parse all supplier link to current provider
|
||||
for idx, data in enumerate(self.suppliers[key_name]):
|
||||
if p_data:
|
||||
common_zones = data['zones'] & p_data['zones']
|
||||
if not common_zones:
|
||||
continue
|
||||
for zidx, zone in enumerate(data['zone_names']):
|
||||
if zone in common_zones:
|
||||
break
|
||||
dns = data['server_names'][zidx]
|
||||
else:
|
||||
dns = data['dns']
|
||||
option = data['option']
|
||||
# if not provider, get the true option that we want has value
|
||||
if not is_provider:
|
||||
path_prefix = data['path_prefix']
|
||||
try:
|
||||
supplier_option = self.objectspace.paths.get_supplier(f'supplier:{provider_name}', path_prefix)
|
||||
except KeyError:
|
||||
#warn(f'cannot find supplier "{provider_name}" for "{dns}"')
|
||||
continue
|
||||
# first of all, get the supplier name
|
||||
#
|
||||
if self.objectspace.paths.is_follower(variable):
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = f'dns_{idx}'
|
||||
param.text = option
|
||||
param.name = 'leader'
|
||||
param.text = self.objectspace.paths.get_leader(variable)
|
||||
param.propertyerror = False
|
||||
param.type = 'variable'
|
||||
fill.param.append(param)
|
||||
if not is_provider and \
|
||||
self.objectspace.paths.is_follower(variable):
|
||||
try:
|
||||
leader_provider = self.objectspace.paths.get_leader(variable).provider
|
||||
except:
|
||||
leader_provider = None
|
||||
#
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'index'
|
||||
param.type = 'index'
|
||||
fill.param.append(param)
|
||||
if self.objectspace.paths.is_dynamic(variable):
|
||||
# if dynamic: current suffix
|
||||
# and add current DNS name, this is useful to known if supplier is link to this provider
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'suffix'
|
||||
param.type = 'suffix'
|
||||
fill.param.append(param)
|
||||
if provider_name != 'global':
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'dns'
|
||||
param.text = provider_data['server_names']
|
||||
fill.param.append(param)
|
||||
if provider_name == 'global':
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = 'value'
|
||||
if provider_data['provider_name'] in self.objectspace.rougailconfig['risotto_globals'][provider_data['dns']]:
|
||||
value = self.objectspace.rougailconfig['risotto_globals'][provider_data['dns']][provider_data['provider_name']]
|
||||
param.text = value
|
||||
if isinstance(value, bool):
|
||||
param.type = 'boolean'
|
||||
else:
|
||||
param.text = provider_data['provider_name']
|
||||
param.type = 'information'
|
||||
fill.param.append(param)
|
||||
else:
|
||||
# parse all supplier link to current provider
|
||||
for idx, data in enumerate(self.suppliers[provider_name]):
|
||||
if 'zones' in provider_data:
|
||||
if provider_name not in data['providers_zone']:
|
||||
continue
|
||||
zone = data['providers_zone'][provider_name]
|
||||
zidx = data['zones'].index(zone)
|
||||
dns = data['server_names'][zidx]
|
||||
else:
|
||||
dns = data['dns']
|
||||
option = data['option']
|
||||
# if not provider, get the true option that we want have value
|
||||
if not provider_data['is_main_provider']:
|
||||
path_prefix = data['path_prefix']
|
||||
try:
|
||||
supplier_option = self.objectspace.paths.get_supplier(f'supplier:{provider_data["provider_name"]}', path_prefix)
|
||||
except KeyError:
|
||||
#warn(f'cannot find supplier "{provider_name}" for "{dns}"')
|
||||
continue
|
||||
# first of all, get the supplier name
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = f'leader_{idx}'
|
||||
if fill.name == 'calc_providers_follower':
|
||||
param.name = f'dns_{idx}'
|
||||
param.text = option
|
||||
param.propertyerror = False
|
||||
param.type = 'variable'
|
||||
fill.param.append(param)
|
||||
if not provider_data['is_main_provider'] and \
|
||||
self.objectspace.paths.is_follower(variable):
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = f'leader_{idx}'
|
||||
if fill.name == 'calc_providers_follower':
|
||||
param.text = dns
|
||||
else:
|
||||
if self.objectspace.paths.is_follower(supplier_option):
|
||||
param.text = self.objectspace.paths.get_leader(supplier_option)
|
||||
else:
|
||||
param.text = self.objectspace.paths.get_supplier(f'supplier:{leader_provider}', path_prefix)
|
||||
param.propertyerror = False
|
||||
param.type = 'variable'
|
||||
fill.param.append(param)
|
||||
# get the current DNS name for dynamic variable
|
||||
if self.objectspace.paths.is_dynamic(variable):
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = f'dynamic_{idx}'
|
||||
param.text = dns
|
||||
fill.param.append(param)
|
||||
# get the current value!
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = f'value_{idx}'
|
||||
if provider_data['is_main_provider']:
|
||||
param.text = dns
|
||||
else:
|
||||
if self.objectspace.paths.is_follower(supplier_option):
|
||||
param.text = self.objectspace.paths.get_leader(supplier_option)
|
||||
else:
|
||||
param.text = self.objectspace.paths.get_supplier(f'supplier:{leader_provider}', path_prefix)
|
||||
param.text = supplier_option
|
||||
param.propertyerror = False
|
||||
param.type = 'variable'
|
||||
fill.param.append(param)
|
||||
# get the current DNS name for dynamic variable
|
||||
if self.objectspace.paths.is_dynamic(variable):
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = f'dynamic_{idx}'
|
||||
param.text = dns
|
||||
fill.param.append(param)
|
||||
# get the current value!
|
||||
param = self.objectspace.param(variable.xmlfiles)
|
||||
param.name = f'value_{idx}'
|
||||
if is_provider:
|
||||
param.text = dns
|
||||
else:
|
||||
param.text = supplier_option
|
||||
param.propertyerror = False
|
||||
param.type = 'variable'
|
||||
fill.param.append(param)
|
||||
if not hasattr(self.objectspace.space.variables[nf_dns], 'constraints'):
|
||||
self.objectspace.space.variables[nf_dns].constraints = self.objectspace.constraints(None)
|
||||
if not hasattr(self.objectspace.space.variables[nf_dns].constraints, 'fill'):
|
||||
self.objectspace.space.variables[nf_dns].constraints.fill = []
|
||||
self.objectspace.space.variables[nf_dns].constraints.fill.append(fill)
|
||||
if not hasattr(self.objectspace.space.variables[provider_data['path_prefix']], 'constraints'):
|
||||
self.objectspace.space.variables[provider_data['path_prefix']].constraints = self.objectspace.constraints(None)
|
||||
if not hasattr(self.objectspace.space.variables[provider_data['path_prefix']].constraints, 'fill'):
|
||||
self.objectspace.space.variables[provider_data['path_prefix']].constraints.fill = []
|
||||
self.objectspace.space.variables[provider_data['path_prefix']].constraints.fill.append(fill)
|
||||
|
||||
def convert_suppliers(self):
|
||||
for supplier, data in self.suppliers.items():
|
||||
if supplier == 'Host':
|
||||
for supplier_name, s_datas in self.suppliers.items():
|
||||
if supplier_name == 'Host':
|
||||
continue
|
||||
for s_dico in data:
|
||||
if supplier not in self.providers:
|
||||
for s_data in s_datas:
|
||||
if supplier_name not in self.providers:
|
||||
continue
|
||||
for p_dico in self.providers[supplier]:
|
||||
common_zones = s_dico['zones'] & p_dico['zones']
|
||||
if not common_zones:
|
||||
for p_data in self.providers[supplier_name]:
|
||||
if s_data['dns'] == p_data['dns']:
|
||||
# supplier and provider are in same machine
|
||||
continue
|
||||
for idx, zone in enumerate(p_dico['zone_names']):
|
||||
if zone in common_zones:
|
||||
break
|
||||
dns = p_dico['server_names'][idx]
|
||||
s_dico['option'].value = dns
|
||||
if supplier_name not in p_data['suppliers_zone'] or s_data['dns'] not in p_data['suppliers_zone'][supplier_name]:
|
||||
continue
|
||||
# get the DNS name in supplier zone
|
||||
zone = p_data['suppliers_zone'][supplier_name][s_data['dns']]
|
||||
if zone not in p_data['zones']:
|
||||
continue
|
||||
zidx = p_data['zones'].index(zone)
|
||||
dns = p_data['server_names'][zidx]
|
||||
new_value = self.objectspace.value(None)
|
||||
new_value.name = dns
|
||||
s_dico['option'].value = [new_value]
|
||||
s_data['option'].value = [new_value]
|
||||
break
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from typing import List
|
|||
from ipaddress import ip_address
|
||||
from toml import load as toml_load
|
||||
from json import load, dump
|
||||
from json.decoder import JSONDecodeError
|
||||
from pprint import pprint
|
||||
|
||||
|
||||
|
|
@ -15,6 +16,10 @@ HERE = environ['PWD']
|
|||
IP_DIR = join(HERE, 'ip')
|
||||
|
||||
|
||||
# custom filters from dataset
|
||||
custom_filters = {}
|
||||
|
||||
|
||||
config_file = environ.get('CONFIG_FILE', 'risotto.conf')
|
||||
if isfile(config_file):
|
||||
with open(config_file, 'r') as fh:
|
||||
|
|
@ -35,10 +40,10 @@ def multi_function(function):
|
|||
return function
|
||||
|
||||
|
||||
async def value_pprint(dico, config):
|
||||
def value_pprint(dico, config):
|
||||
pprint_dict = {}
|
||||
for path, value in dico.items():
|
||||
if await config.option(path).option.type() == 'password' and value:
|
||||
if config.option(path).type() == 'password' and value:
|
||||
value = 'X' * len(value)
|
||||
pprint_dict[path] = value
|
||||
pprint(pprint_dict)
|
||||
|
|
@ -49,28 +54,45 @@ def load_zones(zones, hosts):
|
|||
makedirs(IP_DIR)
|
||||
json_file = join(IP_DIR, 'zones.json')
|
||||
if isfile(json_file):
|
||||
with open(json_file, 'r') as fh:
|
||||
zones_ip = load(fh)
|
||||
try:
|
||||
with open(json_file, 'r') as fh:
|
||||
ori_zones_ip = load(fh)
|
||||
except JSONDecodeError:
|
||||
ori_zones_ip = {}
|
||||
else:
|
||||
zones_ip = {}
|
||||
for host_name, hosts in hosts.items():
|
||||
for server_name, server in hosts['servers'].items():
|
||||
server_zones = server['informations']['zones_name']
|
||||
ori_zones_ip = {}
|
||||
new_zones_ip = {}
|
||||
# cache, machine should not change IP
|
||||
for host_name, dhosts in hosts.items():
|
||||
for server_name, server in dhosts['servers'].items():
|
||||
server_zones = server['zones_name']
|
||||
for idx, zone_name in enumerate(server_zones):
|
||||
zone = zones[zone_name]
|
||||
zone.setdefault('hosts', {})
|
||||
# FIXME make a cache, machine should not change IP
|
||||
if zone_name not in zones_ip:
|
||||
zones_ip[zone_name] = {}
|
||||
if server_name in zones_ip[zone_name]:
|
||||
server_index = zones_ip[zone_name][server_name]
|
||||
elif not zones_ip[zone_name]:
|
||||
server_index = 0
|
||||
if zone_name not in new_zones_ip:
|
||||
new_zones_ip[zone_name] = {}
|
||||
if zone_name in ori_zones_ip and server_name in ori_zones_ip[zone_name]:
|
||||
server_index = ori_zones_ip[zone_name][server_name]
|
||||
if server_index >= zone['length']:
|
||||
server_index = None
|
||||
elif server_index in new_zones_ip[zone_name].values():
|
||||
server_index = None
|
||||
else:
|
||||
# it's the last ip + 1
|
||||
server_index = zones_ip[zone_name][list(zones_ip[zone_name].keys())[-1]] + 1
|
||||
ip = str(ip_address(zone['start_ip']) + server_index)
|
||||
zone['hosts'][server_name] = ip
|
||||
zones_ip[zone_name][server_name] = server_index
|
||||
server_index = None
|
||||
new_zones_ip[zone_name][server_name] = server_index
|
||||
for zone_name, servers in new_zones_ip.items():
|
||||
for server_name, server_idx in servers.items():
|
||||
if server_idx is not None:
|
||||
continue
|
||||
for new_idx in range(zones[zone_name]['length']):
|
||||
if new_idx not in new_zones_ip[zone_name].values():
|
||||
new_zones_ip[zone_name][server_name] = new_idx
|
||||
break
|
||||
else:
|
||||
raise Exception(f'cannot find free IP in zone "{zone_name}" for "{server_name}"')
|
||||
for zone_name, servers in new_zones_ip.items():
|
||||
start_ip = ip_address(zones[zone_name]['start_ip'])
|
||||
for server_name, server_index in servers.items():
|
||||
zones[zone_name]['hosts'][server_name] = str(start_ip + server_index)
|
||||
with open(json_file, 'w') as fh:
|
||||
dump(zones_ip, fh)
|
||||
dump(new_zones_ip, fh)
|
||||
|
|
|
|||
Loading…
Reference in a new issue