convert to ansible
This commit is contained in:
parent
e3bca44f3a
commit
30a605a81c
23 changed files with 1521 additions and 75 deletions
48
ansible/action_plugins/build_images.py
Normal file
48
ansible/action_plugins/build_images.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
from os import listdir, makedirs
|
||||
from os.path import isfile, isdir, join, dirname
|
||||
from shutil import copy2, copytree, rmtree
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from risotto.utils import RISOTTO_CONFIG
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
modules = module_args['modules']
|
||||
dataset_directory = RISOTTO_CONFIG['directories']['dataset']
|
||||
install_dir = join('/tmp/risotto/images')
|
||||
if isdir(install_dir):
|
||||
rmtree(install_dir)
|
||||
for module_name, depends in modules.items():
|
||||
for depend in depends:
|
||||
manual = join(dataset_directory, depend, 'manual', 'image')
|
||||
if not isdir(manual):
|
||||
continue
|
||||
for filename in listdir(manual):
|
||||
src_file = join(manual, filename)
|
||||
dst_file = join(install_dir, module_name, filename)
|
||||
if isdir(src_file):
|
||||
if not isdir(dst_file):
|
||||
makedirs(dst_file)
|
||||
for subfilename in listdir(src_file):
|
||||
if not isfile(dst_file):
|
||||
src = join(src_file, subfilename)
|
||||
dst = join(dst_file, subfilename)
|
||||
if isfile(src):
|
||||
copy2(src, dst)
|
||||
else:
|
||||
copytree(src, dst)
|
||||
elif not isfile(dst_file):
|
||||
dst = dirname(dst_file)
|
||||
if not isdir(dst):
|
||||
makedirs(dst)
|
||||
if isfile(src_file):
|
||||
copy2(src_file, dst_file)
|
||||
else:
|
||||
copytree(src_file, dst_file)
|
||||
return dict(ansible_facts=dict({}))
|
||||
|
14
ansible/action_plugins/machinectl.py
Normal file
14
ansible/action_plugins/machinectl.py
Normal file
|
@ -0,0 +1,14 @@
|
|||
#!/usr/bin/python3
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
module_return = self._execute_module(module_name='machinectl',
|
||||
module_args=module_args,
|
||||
task_vars=task_vars, tmp=tmp)
|
||||
if module_return.get('failed'):
|
||||
return module_return
|
||||
return {'ansible_facts': {}, 'changed': module_return['changed']}
|
55
ansible/action_plugins/rougail.py
Normal file
55
ansible/action_plugins/rougail.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
#!/usr/bin/python3
|
||||
from ansible.plugins.action import ActionBase
|
||||
from asyncio import run
|
||||
from shutil import rmtree
|
||||
from os.path import isdir, join
|
||||
from os import makedirs
|
||||
|
||||
from risotto.machine import templates, load, ROUGAIL_NAMESPACE
|
||||
from risotto.utils import RISOTTO_CONFIG
|
||||
from rougail.utils import normalize_family
|
||||
|
||||
|
||||
TIRAMISU_CACHE = 'tiramisu_cache.py'
|
||||
VALUES_CACHE = 'values_cache.py'
|
||||
INSTALL_DIR = RISOTTO_CONFIG['directories']['dest']
|
||||
|
||||
|
||||
async def build_files(server_name, is_host):
|
||||
module_infos, rougailconfig, config = await load(TIRAMISU_CACHE,
|
||||
VALUES_CACHE,
|
||||
)
|
||||
subconfig = config.option(normalize_family(server_name))
|
||||
module_name = await subconfig.option(await subconfig.information.get('provider:global:module_name')).value.get()
|
||||
module_info = module_infos[module_name]
|
||||
rougailconfig['tmp_dir'] = 'tmp'
|
||||
rougailconfig['destinations_dir'] = INSTALL_DIR
|
||||
rougailconfig['templates_dir'] = module_info['infos'].templates_dir
|
||||
if is_host:
|
||||
tmpfile = await subconfig.option(f'{ROUGAIL_NAMESPACE}.host_install_dir').value.get()
|
||||
rougailconfig['tmpfile_dest_dir'] = f'{tmpfile}'
|
||||
rougailconfig['default_systemd_directory'] = '/usr/local/lib/systemd'
|
||||
else:
|
||||
rougailconfig['tmpfile_dest_dir'] = '/usr/local/lib'
|
||||
rougailconfig['default_systemd_directory'] = '/systemd'
|
||||
if isdir(rougailconfig['destinations_dir']):
|
||||
rmtree(rougailconfig['destinations_dir'])
|
||||
if isdir(rougailconfig['tmp_dir']):
|
||||
rmtree(rougailconfig['tmp_dir'])
|
||||
makedirs(rougailconfig['tmp_dir'])
|
||||
makedirs(rougailconfig['destinations_dir'])
|
||||
await templates(server_name,
|
||||
subconfig,
|
||||
rougailconfig,
|
||||
)
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
super(ActionModule, self).run(tmp, task_vars)
|
||||
module_args = self._task.args.copy()
|
||||
name = module_args['hostname']
|
||||
is_host = module_args['is_host']
|
||||
|
||||
run(build_files(name, is_host))
|
||||
return dict(ansible_facts=dict({}))
|
1
ansible/file.txt
Normal file
1
ansible/file.txt
Normal file
|
@ -0,0 +1 @@
|
|||
{'pouet': 'a'}
|
114
ansible/filter_plugins/fileslist.py
Normal file
114
ansible/filter_plugins/fileslist.py
Normal file
|
@ -0,0 +1,114 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
|
||||
from os.path import dirname
|
||||
|
||||
|
||||
def _add(files, file_data, name, name_only, prefix):
|
||||
if prefix is not None:
|
||||
name = prefix + name
|
||||
if name_only:
|
||||
files.append(name)
|
||||
else:
|
||||
files.append({'name': name,
|
||||
'owner': file_data['owner'],
|
||||
'group': file_data['group'],
|
||||
'mode': file_data['mode'],
|
||||
})
|
||||
|
||||
|
||||
def fileslist(data, is_host=False, name_only=False, prefix=None):
|
||||
files = []
|
||||
if is_host:
|
||||
base_systemd = '/usr/local/lib'
|
||||
else:
|
||||
base_systemd = ''
|
||||
_add(files,
|
||||
{'owner': 'root', 'group': 'root', 'mode': '0755'},
|
||||
f'/tmpfiles.d/0rougail.conf',
|
||||
name_only,
|
||||
prefix,
|
||||
)
|
||||
for service, service_data in data.items():
|
||||
if service_data['activate'] and service_data['engine'] != 'none':
|
||||
_add(files,
|
||||
{'owner': 'root', 'group': 'root', 'mode': '0755'},
|
||||
base_systemd + '/systemd/system/' + service_data['doc'],
|
||||
name_only,
|
||||
prefix,
|
||||
)
|
||||
if service_data['activate'] and 'overrides' in service_data:
|
||||
for override_data in service_data['overrides'].values():
|
||||
_add(files,
|
||||
{'owner': 'root', 'group': 'root', 'mode': '0755'},
|
||||
base_systemd + '/systemd/system/' + override_data['name'] + '.d/rougail.conf',
|
||||
name_only,
|
||||
prefix,
|
||||
)
|
||||
if 'files' not in service_data:
|
||||
continue
|
||||
for file_data in service_data['files'].values():
|
||||
if not file_data['activate'] or file_data['included'] == 'content':
|
||||
continue
|
||||
if isinstance(file_data['name'], list):
|
||||
for name in file_data['name']:
|
||||
_add(files, file_data, name, name_only, prefix)
|
||||
else:
|
||||
_add(files, file_data, file_data['name'], name_only, prefix)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def directorieslist(data):
|
||||
directories = {'/usr/local/lib/systemd/system/'}
|
||||
for service, service_data in data.items():
|
||||
if 'files' not in service_data:
|
||||
continue
|
||||
for file_data in service_data['files'].values():
|
||||
if not file_data['activate']:
|
||||
continue
|
||||
if isinstance(file_data['name'], list):
|
||||
for name in file_data['name']:
|
||||
directories.add(dirname(name))
|
||||
else:
|
||||
directories.add(dirname(file_data['name']))
|
||||
|
||||
return list(directories)
|
||||
|
||||
|
||||
def machineslist(data, only=None, only_name=False):
|
||||
srv = []
|
||||
if only is not None:
|
||||
if only_name:
|
||||
srv.append(only)
|
||||
else:
|
||||
srv.append({'name': only,
|
||||
'srv': data[only]['machine']['add_srv'],
|
||||
}
|
||||
)
|
||||
else:
|
||||
for host, host_data in data.items():
|
||||
if '.' not in host or not isinstance(host_data, dict) or 'general' not in host_data or host_data['general']['module_name'] == 'host':
|
||||
continue
|
||||
if only_name:
|
||||
srv.append(host)
|
||||
else:
|
||||
srv.append({'name': host,
|
||||
'srv': host_data['machine']['add_srv'],
|
||||
}
|
||||
)
|
||||
return srv
|
||||
|
||||
|
||||
def modulename(data, servername):
|
||||
return data[servername]['module_name']
|
||||
|
||||
|
||||
class FilterModule:
|
||||
def filters(self):
|
||||
return {
|
||||
'fileslist': fileslist,
|
||||
'directorieslist': directorieslist,
|
||||
'machineslist': machineslist,
|
||||
'modulename': modulename,
|
||||
}
|
143
ansible/host.yml
Normal file
143
ansible/host.yml
Normal file
|
@ -0,0 +1,143 @@
|
|||
---
|
||||
- name: "Populate service facts"
|
||||
service_facts:
|
||||
|
||||
- name: "Stop services"
|
||||
when: item.value['manage'] and item.value['activate'] and item.value['doc'].endswith('.service') and not item.value['doc'].endswith('@.service') and item.value['engine'] != 'none'
|
||||
ansible.builtin.service:
|
||||
name: "{{ item.value['doc'] }}"
|
||||
state: stopped
|
||||
loop: "{{ vars[inventory_hostname]['services'] | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.value['doc'] }}"
|
||||
|
||||
- name: "Packages installation"
|
||||
apt:
|
||||
pkg: "{{ vars[inventory_hostname]['general']['host_packages'] }}"
|
||||
update_cache: yes
|
||||
state: latest
|
||||
|
||||
- name: "Build host files"
|
||||
local_action:
|
||||
module: rougail
|
||||
hostname: "{{ inventory_hostname }}"
|
||||
is_host: True
|
||||
|
||||
- name: "Create host directories"
|
||||
file: path={{ item }} state=directory mode=0755
|
||||
loop: "{{ vars[inventory_hostname]['services'] | directorieslist }}"
|
||||
|
||||
- name: "Copy systemd-tmpfiles"
|
||||
when: item.name.startswith('/usr/local/lib/risotto-tmpfiles.d')
|
||||
ansible.builtin.copy:
|
||||
src: installations/{{ item.name }}
|
||||
dest: "{{ item.name }}"
|
||||
owner: "{{ item.owner }}"
|
||||
group: "{{ item.group }}"
|
||||
mode: "{{ item.mode }}"
|
||||
loop: "{{ vars[inventory_hostname]['services'] | fileslist(is_host=True) }}"
|
||||
loop_control:
|
||||
label: "{{ item.name}}"
|
||||
|
||||
- name: "Execute systemd-tmpfiles"
|
||||
when: item.name.startswith('/usr/local/lib/risotto-tmpfiles.d')
|
||||
command: /usr/bin/systemd-tmpfiles --create --clean --remove {{ item.name }}
|
||||
loop: "{{ vars[inventory_hostname]['services'] | fileslist(is_host=True) }}"
|
||||
loop_control:
|
||||
label: "{{ item.name}}"
|
||||
|
||||
- name: "Copy host files"
|
||||
when: not item.name.startswith('/usr/local/lib/tmpfiles.d')
|
||||
ansible.builtin.copy:
|
||||
src: installations/{{ item.name }}
|
||||
dest: "{{ item.name }}"
|
||||
owner: "{{ item.owner }}"
|
||||
group: "{{ item.group }}"
|
||||
mode: "{{ item.mode }}"
|
||||
loop: "{{ vars[inventory_hostname]['services'] | fileslist(is_host=True) }}"
|
||||
loop_control:
|
||||
label: "{{ item.name}}"
|
||||
|
||||
- name: "Reload systemd services configuration"
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
- name: "Enable services"
|
||||
when: item.value['manage'] and item.value['activate'] and '@.service' not in item.value['doc']
|
||||
ansible.builtin.service:
|
||||
name: "{{ item.value['doc'] }}"
|
||||
enabled: yes
|
||||
loop: "{{ vars[inventory_hostname]['services'] | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.value['doc'] }}"
|
||||
|
||||
- name: "Disable services"
|
||||
when: item.value['manage'] and not item.value['activate'] and not item.value['undisable'] and '@.service' not in item.value['doc']
|
||||
ansible.builtin.service:
|
||||
name: "{{ item.value['doc'] }}"
|
||||
enabled: yes
|
||||
loop: "{{ vars[inventory_hostname]['services'] | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.value['doc'] }}"
|
||||
|
||||
- name: "Start services"
|
||||
when: item.value['manage'] and item.value['activate'] and item.value['doc'].endswith('.service') and not item.value['doc'].endswith('@.service') and item.value['engine'] != 'none'
|
||||
ansible.builtin.service:
|
||||
name: "{{ item.value['doc'] }}"
|
||||
state: started
|
||||
loop: "{{ vars[inventory_hostname]['services'] | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.value['doc'] }}"
|
||||
|
||||
- name: "Restart services"
|
||||
when: item.value['manage'] and item.value['activate'] and item.value['doc'].endswith('.service') and not item.value['doc'].endswith('@.service') and item.value['engine'] == 'none'
|
||||
ansible.builtin.service:
|
||||
name: "{{ item.value['doc'] }}"
|
||||
state: restarted
|
||||
loop: "{{ vars[inventory_hostname]['services'] | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.value['doc'] }}"
|
||||
|
||||
- name: "Copy machines scripts"
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/usr/local/sbin"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
loop: "{{ lookup('fileglob', '../sbin/*', wantlist=True) | list }}"
|
||||
|
||||
# Images informations
|
||||
- name: "Remove images tar"
|
||||
local_action:
|
||||
module: file
|
||||
path: /tmp/risotto/images.tar
|
||||
state: absent
|
||||
|
||||
- name: "Build images files"
|
||||
local_action:
|
||||
module: build_images
|
||||
modules: "{{ vars['modules'] }}"
|
||||
|
||||
- name: "Compress images files"
|
||||
local_action:
|
||||
module: archive
|
||||
path: "/tmp/risotto/images/"
|
||||
dest: /tmp/risotto/images.tar
|
||||
format: tar
|
||||
|
||||
- name: "Remove dest images files"
|
||||
file:
|
||||
path: /var/lib/risotto/images_files
|
||||
state: absent
|
||||
|
||||
- name: "Create images files"
|
||||
file:
|
||||
path: /var/lib/risotto/images_files
|
||||
state: directory
|
||||
mode: "0700"
|
||||
|
||||
- name: "Copy images files"
|
||||
unarchive:
|
||||
src: "/tmp/risotto/images.tar"
|
||||
dest: "/var/lib/risotto/images_files"
|
1
ansible/installations
Symbolic link
1
ansible/installations
Symbolic link
|
@ -0,0 +1 @@
|
|||
/home/gnunux/git/risotto/risotto/installations/
|
15
ansible/inventory.json
Normal file
15
ansible/inventory.json
Normal file
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"_meta": {
|
||||
"hostvars": {}
|
||||
},
|
||||
"all": {
|
||||
"children": [
|
||||
"ungrouped"
|
||||
]
|
||||
},
|
||||
"ungrouped": {
|
||||
"hosts": [
|
||||
"cloud.silique.fr"
|
||||
]
|
||||
}
|
||||
}
|
114
ansible/inventory.py
Executable file
114
ansible/inventory.py
Executable file
|
@ -0,0 +1,114 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
'''
|
||||
Example custom dynamic inventory script for Ansible, in Python.
|
||||
'''
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from json import dumps, JSONEncoder
|
||||
from os import remove
|
||||
from os.path import isfile
|
||||
from asyncio import run
|
||||
|
||||
from risotto.machine import load
|
||||
from risotto.image import load_config
|
||||
from risotto.utils import SERVERS
|
||||
from tiramisu.error import PropertiesOptionError
|
||||
from rougail.utils import normalize_family
|
||||
from rougail import RougailSystemdTemplate
|
||||
from rougail.template.base import RougailLeader, RougailExtra
|
||||
|
||||
TIRAMISU_CACHE = 'tiramisu_cache.py'
|
||||
VALUES_CACHE = 'values_cache.py'
|
||||
|
||||
|
||||
class RougailEncoder(JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, RougailLeader):
|
||||
return obj._follower
|
||||
if isinstance(obj, RougailExtra):
|
||||
return obj._suboption
|
||||
if isinstance(obj, PropertiesOptionError):
|
||||
return 'PropertiesOptionError'
|
||||
return JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
class RisottoInventory(object):
|
||||
def __init__(self):
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--list', action='store_true')
|
||||
parser.add_argument('--host', action='store')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
async def run(self):
|
||||
if self.args.list:
|
||||
if isfile(TIRAMISU_CACHE):
|
||||
remove(TIRAMISU_CACHE)
|
||||
if isfile(VALUES_CACHE):
|
||||
remove(VALUES_CACHE)
|
||||
return await self.do_inventory()
|
||||
elif self.args.host:
|
||||
return await self.get_vars(self.args.host)
|
||||
raise Exception('pfff')
|
||||
|
||||
async def do_inventory(self):
|
||||
module_infos = load_config(True,
|
||||
True,
|
||||
True,
|
||||
)
|
||||
servers = []
|
||||
for server_name, server in SERVERS.items():
|
||||
module_name = server['module']
|
||||
if module_name != 'host':
|
||||
continue
|
||||
servers.append(server_name)
|
||||
return dumps({
|
||||
'group': {
|
||||
'hosts': servers,
|
||||
'vars': {
|
||||
# FIXME
|
||||
'ansible_ssh_host': '192.168.56.156',
|
||||
'ansible_ssh_user': 'root',
|
||||
'ansible_python_interpreter': '/usr/bin/python3'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
async def get_vars(self,
|
||||
host_name: str,
|
||||
) -> dict:
|
||||
try:
|
||||
module_infos, rougailconfig, config = await load(TIRAMISU_CACHE,
|
||||
VALUES_CACHE,
|
||||
)
|
||||
except Exception as err:
|
||||
# import traceback
|
||||
# traceback.print_exc()
|
||||
print(err)
|
||||
exit(1)
|
||||
ret = {}
|
||||
modules = set()
|
||||
for server_name, server in SERVERS.items():
|
||||
if server['module'] == 'host' and server_name != host_name:
|
||||
continue
|
||||
modules.add(server['module'])
|
||||
subconfig = config.option(normalize_family(server_name))
|
||||
engine = RougailSystemdTemplate(subconfig, rougailconfig)
|
||||
await engine.load_variables()
|
||||
if server['module'] != 'host' and engine.rougail_variables_dict['general']['host'] != host_name:
|
||||
continue
|
||||
ret[server_name] = engine.rougail_variables_dict
|
||||
ret['modules'] = {module_name: module_info['infos'].depends for module_name, module_info in module_infos.items() if module_name in modules}
|
||||
ret['configure_host'] = True
|
||||
ret['only_machine'] = None
|
||||
return dumps(ret, cls=RougailEncoder)
|
||||
|
||||
|
||||
# Get the inventory.
|
||||
async def main():
|
||||
inv = RisottoInventory()
|
||||
values = await inv.run()
|
||||
print(values)
|
||||
|
||||
|
||||
run(main())
|
205
ansible/library/machinectl.py
Normal file
205
ansible/library/machinectl.py
Normal file
|
@ -0,0 +1,205 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
from time import sleep
|
||||
from os import fdopen
|
||||
from dbus import SystemBus, Array
|
||||
from dbus.exceptions import DBusException
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
|
||||
def stop(bus, machines):
|
||||
changed = False
|
||||
remote_object = bus.get_object('org.freedesktop.machine1',
|
||||
'/org/freedesktop/machine1',
|
||||
False,
|
||||
)
|
||||
res = remote_object.ListMachines(dbus_interface='org.freedesktop.machine1.Manager')
|
||||
started_machines = [str(r[0]) for r in res if str(r[0]) != '.host']
|
||||
for host in machines:
|
||||
if host not in started_machines:
|
||||
continue
|
||||
changed = True
|
||||
remote_object.TerminateMachine(host, dbus_interface='org.freedesktop.machine1.Manager')
|
||||
idx = 0
|
||||
errors = []
|
||||
while True:
|
||||
res = remote_object.ListMachines(dbus_interface='org.freedesktop.machine1.Manager')
|
||||
started_machines = [str(r[0]) for r in res if str(r[0]) != '.host']
|
||||
for host in machines:
|
||||
if host in started_machines:
|
||||
break
|
||||
else:
|
||||
break
|
||||
sleep(1)
|
||||
idx += 1
|
||||
if idx == 120:
|
||||
errors.append('Cannot not stopped: ' + ','.join(started_machines))
|
||||
break
|
||||
return changed, errors
|
||||
|
||||
|
||||
def start(bus, machines):
|
||||
changed = False
|
||||
remote_object = bus.get_object('org.freedesktop.machine1',
|
||||
'/org/freedesktop/machine1',
|
||||
False,
|
||||
)
|
||||
res = remote_object.ListMachines(dbus_interface='org.freedesktop.machine1.Manager')
|
||||
started_machines = [str(r[0]) for r in res if str(r[0]) != '.host']
|
||||
remote_object_system = bus.get_object('org.freedesktop.systemd1',
|
||||
'/org/freedesktop/systemd1',
|
||||
False,
|
||||
)
|
||||
for host in machines:
|
||||
if host in started_machines:
|
||||
continue
|
||||
changed = True
|
||||
service = f'systemd-nspawn@{host}.service'
|
||||
remote_object_system.StartUnit(service, 'fail', dbus_interface='org.freedesktop.systemd1.Manager')
|
||||
errors = []
|
||||
idx = 0
|
||||
while True:
|
||||
res = remote_object.ListMachines(dbus_interface='org.freedesktop.machine1.Manager')
|
||||
started_machines = [str(r[0]) for r in res if str(r[0]) != '.host']
|
||||
for host in machines:
|
||||
if host not in started_machines:
|
||||
break
|
||||
else:
|
||||
break
|
||||
sleep(1)
|
||||
idx += 1
|
||||
if idx == 120:
|
||||
hosts = set(machines) - set(started_machines)
|
||||
errors.append('Cannot not start: ' + ','.join(hosts))
|
||||
break
|
||||
if not errors:
|
||||
idx = 0
|
||||
for host in machines:
|
||||
cmd = ['/usr/bin/systemctl', 'is-system-running']
|
||||
error = False
|
||||
while True:
|
||||
try:
|
||||
res = remote_object.OpenMachineShell(host,
|
||||
'',
|
||||
cmd[0],
|
||||
Array(cmd, signature='s'),
|
||||
Array(['TERM=dumb'], signature='s'),
|
||||
dbus_interface='org.freedesktop.machine1.Manager',
|
||||
)
|
||||
fd = res[0].take()
|
||||
fh = fdopen(fd)
|
||||
ret = []
|
||||
while True:
|
||||
try:
|
||||
ret.append(fh.readline().strip())
|
||||
except OSError as err:
|
||||
if err.errno != 5:
|
||||
raise err from err
|
||||
break
|
||||
if not ret:
|
||||
errors.append(f'Cannot check {host} status')
|
||||
error = True
|
||||
break
|
||||
if ret[0] in ['running', 'degraded']:
|
||||
break
|
||||
except DBusException:
|
||||
pass
|
||||
idx += 1
|
||||
sleep(1)
|
||||
if idx == 120:
|
||||
errors.append(f'Cannot not start {host} ({ret})')
|
||||
break
|
||||
if error:
|
||||
continue
|
||||
if ret[0] == 'running':
|
||||
continue
|
||||
cmd = ['/usr/bin/systemctl', '--state=failed', '--no-legend', '--no-page']
|
||||
res = remote_object.OpenMachineShell(host,
|
||||
'',
|
||||
cmd[0],
|
||||
Array(cmd, signature='s'),
|
||||
Array(['TERM=dumb'], signature='s'),
|
||||
dbus_interface='org.freedesktop.machine1.Manager',
|
||||
)
|
||||
fd = res[0].take()
|
||||
fh = fdopen(fd)
|
||||
ret = []
|
||||
idx2 = 0
|
||||
while True:
|
||||
try:
|
||||
ret.append(fh.readline().strip())
|
||||
except OSError as err:
|
||||
if err.errno != 5:
|
||||
raise err from err
|
||||
break
|
||||
idx2 += 1
|
||||
if idx2 == 120:
|
||||
errors.append(f'Cannot not get status to {host}')
|
||||
break
|
||||
errors.append(f'{host}: ' + '\n'.join(ret))
|
||||
return changed, errors
|
||||
|
||||
def run_module():
|
||||
# define available arguments/parameters a user can pass to the module
|
||||
module_args = dict(
|
||||
state=dict(type='str', required=True),
|
||||
machines=dict(type='list', required=True),
|
||||
)
|
||||
|
||||
# seed the result dict in the object
|
||||
# we primarily care about changed and state
|
||||
# changed is if this module effectively modified the target
|
||||
# state will include any data that you want your module to pass back
|
||||
# for consumption, for example, in a subsequent task
|
||||
result = dict(
|
||||
changed=False,
|
||||
message=''
|
||||
)
|
||||
|
||||
# the AnsibleModule object will be our abstraction working with Ansible
|
||||
# this includes instantiation, a couple of common attr would be the
|
||||
# args/params passed to the execution, as well as if the module
|
||||
# supports check mode
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
# if the user is working with this module in only check mode we do not
|
||||
# want to make any changes to the environment, just return the current
|
||||
# state with no modifications
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
bus = SystemBus()
|
||||
|
||||
# manipulate or modify the state as needed (this is going to be the
|
||||
# part where your module will do what it needs to do)
|
||||
machines = module.params['machines']
|
||||
if module.params['state'] == 'stopped':
|
||||
result['changed'], errors = stop(bus, machines)
|
||||
if errors:
|
||||
errors = '\n\n'.join(errors)
|
||||
module.fail_json(msg=f'Some machines are not stopping correctly {errors}', **result)
|
||||
elif module.params['state'] == 'started':
|
||||
result['changed'], errors = start(bus, machines)
|
||||
if errors:
|
||||
errors = '\n\n'.join(errors)
|
||||
module.fail_json(msg=f'Some machines are not running correctly {errors}', **result)
|
||||
else:
|
||||
module.fail_json(msg=f"Unknown state: {module.params['state']}")
|
||||
|
||||
|
||||
|
||||
# in the event of a successful module execution, you will want to
|
||||
# simple AnsibleModule.exit_json(), passing the key/value results
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
93
ansible/machine.yml
Normal file
93
ansible/machine.yml
Normal file
|
@ -0,0 +1,93 @@
|
|||
- name: "Create SRV directory for {{ item.name}}"
|
||||
when: "item.srv"
|
||||
file: path=/var/lib/risotto/srv/{{ item.name }} state=directory mode=0755
|
||||
|
||||
- name: "Create SystemD directory for {{ item.name }}"
|
||||
file: path=/var/lib/risotto/journals/{{ item.name }} state=directory mode=0755
|
||||
|
||||
- name: "Build machine files for {{ item.name }}"
|
||||
local_action:
|
||||
module: rougail
|
||||
hostname: "{{ item.name}}"
|
||||
is_host: False
|
||||
|
||||
- name: "Get local informations for {{ item.name }} configuration's file"
|
||||
local_action:
|
||||
module: stat
|
||||
path: "installations{{ file.name }}"
|
||||
checksum: sha256
|
||||
get_checksum: yes
|
||||
loop: "{{ vars[item.name]['services'] | fileslist }}"
|
||||
loop_control:
|
||||
loop_var: file
|
||||
label: "{{ file.name }}"
|
||||
register: local_configuration
|
||||
|
||||
- name: "Get remote informations for {{ item.name }} configuration's file"
|
||||
stat:
|
||||
path: "/var/lib/risotto/configurations/{{ item.name }}{{ file.name }}"
|
||||
checksum: sha256
|
||||
get_checksum: yes
|
||||
loop: "{{ vars[item.name]['services'] | fileslist }}"
|
||||
loop_control:
|
||||
loop_var: file
|
||||
label: "{{ file.name }}"
|
||||
register: remote_configuration
|
||||
|
||||
- name: "Configuration's file is up to date in {{ item.name }}"
|
||||
debug:
|
||||
msg: "file is {{ 'out of date' if not file[1].stat.exists or file[0].stat.checksum != file[1].stat.checksum else 'up to date' }}"
|
||||
changed_when: not file[1].stat.exists or file[0].stat.checksum != file[1].stat.checksum
|
||||
loop: "{{ local_configuration.results | zip(remote_configuration.results) | list }}"
|
||||
loop_control:
|
||||
loop_var: file
|
||||
label: "{{ file[0]['stat']['path'] }}"
|
||||
ignore_errors: true
|
||||
register: up_to_date_configuration
|
||||
|
||||
- name: "Remove Compressed files for {{ item.name }}"
|
||||
local_action:
|
||||
module: file
|
||||
path: /tmp/new_configurations/{{ item.name }}
|
||||
state: absent
|
||||
when: up_to_date_configuration.changed
|
||||
|
||||
- name: "Compress files for {{ item.name }}"
|
||||
local_action:
|
||||
module: archive
|
||||
path: "installations/"
|
||||
dest: /tmp/new_configurations/{{ item.name }}
|
||||
format: tar
|
||||
when: up_to_date_configuration.changed
|
||||
|
||||
- name: "Create system directory for {{ item.name }}"
|
||||
file:
|
||||
path: /var/lib/machines/{{ item.name }}
|
||||
state: directory
|
||||
register: system_directory_created
|
||||
|
||||
- name: "Check image for {{ item.name }}"
|
||||
stat:
|
||||
path: "/var/lib/risotto/images/{{ vars | modulename(item.name) }}.tar"
|
||||
register: register_name
|
||||
when: system_directory_created.changed
|
||||
|
||||
- name: "Build image for {{ item.name }}"
|
||||
ansible.builtin.shell: "/usr/local/sbin/build_image {{ vars | modulename(item.name) }}"
|
||||
when: system_directory_created.changed and not register_name.stat.exists
|
||||
|
||||
- name: "Uncompress machine image for {{ item.name }}"
|
||||
unarchive:
|
||||
src: "/var/lib/risotto/images/{{ vars | modulename(item.name) }}.tar"
|
||||
remote_src: true
|
||||
dest: /var/lib/machines/{{ item.name }}/
|
||||
when: system_directory_created.changed
|
||||
|
||||
- name: "SHA machine image for {{ item.name }}"
|
||||
ansible.builtin.copy:
|
||||
src: "/var/lib/risotto/images/{{ vars | modulename(item.name) }}.tar.sha"
|
||||
remote_src: true
|
||||
dest: "/var/lib/risotto/configurations/sha/{{ item.name }}.sha"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
when: system_directory_created.changed
|
33
ansible/machines.yml
Normal file
33
ansible/machines.yml
Normal file
|
@ -0,0 +1,33 @@
|
|||
- name: "Stop machines with new configuration"
|
||||
machinectl:
|
||||
state: stopped
|
||||
machines: "{{ lookup('fileglob', '/tmp/new_configurations/*', wantlist=True) | map('basename') | list }}"
|
||||
|
||||
- name: "Remove files directory"
|
||||
file:
|
||||
path: "/var/lib/risotto/configurations/{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ lookup('fileglob', '/tmp/new_configurations/*', wantlist=True) | map('basename') | list }}"
|
||||
|
||||
- name: "Create files directory"
|
||||
file:
|
||||
path: "/var/lib/risotto/configurations/{{ item }}"
|
||||
state: directory
|
||||
loop: "{{ lookup('fileglob', '/tmp/new_configurations/*', wantlist=True) | map('basename') | list }}"
|
||||
|
||||
- name: "Copy configuration"
|
||||
unarchive:
|
||||
src: "{{ item }}"
|
||||
dest: /var/lib/risotto/configurations/{{ item | basename }}/
|
||||
loop: "{{ lookup('fileglob', '/tmp/new_configurations/*', wantlist=True) }}"
|
||||
|
||||
- name: "Start machines"
|
||||
machinectl:
|
||||
state: started
|
||||
machines: "{{ vars | machineslist(only_name=True) }}"
|
||||
|
||||
- name: "Remove compressed files directory"
|
||||
local_action:
|
||||
module: file
|
||||
path: /tmp/new_configurations
|
||||
state: absent
|
1
ansible/password
Symbolic link
1
ansible/password
Symbolic link
|
@ -0,0 +1 @@
|
|||
../password/
|
1
ansible/pki
Symbolic link
1
ansible/pki
Symbolic link
|
@ -0,0 +1 @@
|
|||
../pki/
|
23
ansible/playbook.txt
Normal file
23
ansible/playbook.txt
Normal file
|
@ -0,0 +1,23 @@
|
|||
- name: installation dépendances
|
||||
apt:
|
||||
pkg:
|
||||
- systemd-container
|
||||
- dnf
|
||||
- jq
|
||||
- debootstrap
|
||||
- htop
|
||||
- gettext
|
||||
- patch
|
||||
- unzip
|
||||
- mlocate
|
||||
- xz-utils
|
||||
- iptables
|
||||
update_cache: yes
|
||||
state: latest
|
||||
|
||||
MARCHE
|
||||
- name: installation dépendances
|
||||
apt:
|
||||
pkg: "{{ packages }}"
|
||||
update_cache: yes
|
||||
state: latest
|
28
ansible/playbook.yml
Normal file
28
ansible/playbook.yml
Normal file
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
#FIXME : si on redemarre a appel tmpfiles.d ....
|
||||
- name: Risotto
|
||||
hosts: cloud.silique.fr
|
||||
tasks:
|
||||
- name: "Configure the host"
|
||||
include_tasks: host.yml
|
||||
when: configure_host == true
|
||||
|
||||
- name: "Remove compressed files directory"
|
||||
local_action:
|
||||
module: file
|
||||
path: /tmp/new_configurations
|
||||
state: absent
|
||||
|
||||
- name: "Create compressed configuration files directory"
|
||||
local_action:
|
||||
module: file
|
||||
path: /tmp/new_configurations
|
||||
state: directory
|
||||
mode: 0700
|
||||
|
||||
- name: "Prepare machine configuration"
|
||||
include_tasks: machine.yml
|
||||
loop: "{{ vars | machineslist(only=only_machine) }}"
|
||||
|
||||
- name: "Install and apply configurations"
|
||||
include_tasks: machines.yml
|
15
bootstrap.py
15
bootstrap.py
|
@ -19,24 +19,13 @@ CONFIG_ORI_DIR = 'ori'
|
|||
SRV_DEST_DIR = 'srv'
|
||||
|
||||
|
||||
def tiramisu_display_name(kls,
|
||||
dyn_name: 'Base'=None,
|
||||
suffix: str=None,
|
||||
) -> str:
|
||||
# FIXME
|
||||
if dyn_name is not None:
|
||||
name = kls.impl_getpath() + str(suffix)
|
||||
else:
|
||||
name = kls.impl_getpath()
|
||||
return name
|
||||
|
||||
|
||||
async def main():
|
||||
if isdir(INSTALL_DIR):
|
||||
rmtree(INSTALL_DIR)
|
||||
makedirs(INSTALL_DIR)
|
||||
try:
|
||||
module_infos, rougailconfig, config = await load(display_name=tiramisu_display_name,
|
||||
module_infos, rougailconfig, config = await load('a.py',
|
||||
'n.py',
|
||||
clean_directories=True,
|
||||
copy_manual_dir=True,
|
||||
copy_tests=True,
|
||||
|
|
196
sbin/build_image
Executable file
196
sbin/build_image
Executable file
|
@ -0,0 +1,196 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
IMAGE_NAME=$1
|
||||
|
||||
if [ -z "$IMAGE_NAME" ]; then
|
||||
echo "PAS DE NOM DE MODULE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# root dir configuration
|
||||
RISOTTO_DIR="/var/lib/risotto"
|
||||
RISOTTO_IMAGE_DIR="$RISOTTO_DIR/images"
|
||||
# image configuration
|
||||
IMAGE_BASE_RISOTTO_BASE_DIR="$RISOTTO_IMAGE_DIR/image_bases"
|
||||
IMAGE_NAME_RISOTTO_IMAGE_DIR="$RISOTTO_IMAGE_DIR/$IMAGE_NAME"
|
||||
IMAGE_NAME_RISOTTO_IMAGE_NAME="$RISOTTO_IMAGE_DIR/$IMAGE_NAME".tar
|
||||
IMAGE_DIR_RECIPIENT_IMAGE="/var/lib/risotto/images_files/$IMAGE_NAME"
|
||||
|
||||
|
||||
#FIXME ou ?
|
||||
|
||||
rm -rf "$IMAGE_NAME_RISOTTO_IMAGE_DIR" "$RISOTTO_IMAGE_DIR/tmp"
|
||||
mkdir -p "$RISOTTO_IMAGE_DIR"
|
||||
PKG=""
|
||||
BASE_DIR=""
|
||||
for script in $(ls "$IMAGE_DIR_RECIPIENT_IMAGE"/preinstall/*.sh 2> /dev/null); do
|
||||
. "$script"
|
||||
done
|
||||
|
||||
if [ -z "$OS_NAME" ]; then
|
||||
echo "NO OS NAME DEFINED"
|
||||
exit 0
|
||||
fi
|
||||
if [ -z "$RELEASEVER" ]; then
|
||||
echo "NO RELEASEVER DEFINED"
|
||||
exit 0
|
||||
fi
|
||||
if [ -z "$INSTALL_TOOL" ]; then
|
||||
echo "NO INSTALL TOOL DEFINED"
|
||||
exit 0
|
||||
fi
|
||||
BASE_NAME="$OS_NAME-$RELEASEVER"
|
||||
BASE_DIR="$IMAGE_BASE_RISOTTO_BASE_DIR/$BASE_NAME"
|
||||
BASE_TAR="$IMAGE_BASE_RISOTTO_BASE_DIR-$BASE_NAME".tar
|
||||
BASE_PKGS_FILE="$IMAGE_BASE_RISOTTO_BASE_DIR-$BASE_NAME.pkgs"
|
||||
BASE_LOCK="$IMAGE_BASE_RISOTTO_BASE_DIR-$BASE_NAME.build"
|
||||
|
||||
|
||||
function dnf_opt_base() {
|
||||
INSTALL_DIR=$1
|
||||
echo "--setopt=install_weak_deps=False --nodocs --noplugins --installroot=$INSTALL_DIR --releasever $RELEASEVER"
|
||||
}
|
||||
|
||||
function dnf_opt() {
|
||||
INSTALL_DIR=$1
|
||||
INSTALL_PKG=$2
|
||||
OPT=$(dnf_opt_base "$INSTALL_DIR")
|
||||
echo "$OPT install $INSTALL_PKG"
|
||||
}
|
||||
function new_package_base() {
|
||||
if [ "$INSTALL_TOOL" = "dnf" ]; then
|
||||
OPT=$(dnf_opt "$BASE_DIR" "$BASE_PKG")
|
||||
dnf --assumeno $OPT | grep ^" " > "$BASE_PKGS_FILE".new
|
||||
else
|
||||
debootstrap --include="$BASE_PKG" --variant=minbase "$RELEASEVER" "$BASE_DIR" > /dev/null
|
||||
chroot "$BASE_DIR" dpkg-query -f '${binary:Package} ${source:Version}\n' -W > "$BASE_PKGS_FILE".new
|
||||
fi
|
||||
}
|
||||
function install_base() {
|
||||
if [ "$INSTALL_TOOL" = "dnf" ]; then
|
||||
OPT=$(dnf_opt "$BASE_DIR" "$BASE_PKG")
|
||||
dnf --assumeyes $OPT
|
||||
fi
|
||||
}
|
||||
function new_package() {
|
||||
if [ "$INSTALL_TOOL" = "dnf" ]; then
|
||||
OPT=$(dnf_opt_base "$IMAGE_NAME_RISOTTO_IMAGE_DIR")
|
||||
dnf $OPT update
|
||||
OPT=$(dnf_opt "$IMAGE_NAME_RISOTTO_IMAGE_DIR" "$PKG")
|
||||
dnf --assumeno $OPT | grep ^" " > "$IMAGE_NAME_RISOTTO_IMAGE_DIR".pkgs.new
|
||||
else
|
||||
chroot "$IMAGE_NAME_RISOTTO_IMAGE_DIR" apt update > /dev/null 2>&1
|
||||
chroot "$IMAGE_NAME_RISOTTO_IMAGE_DIR" apt install --no-install-recommends --yes $PKG -s 2>/dev/null|grep ^"Inst " > "$IMAGE_NAME_RISOTTO_IMAGE_DIR".pkgs.new
|
||||
fi
|
||||
}
|
||||
function install_pkg() {
|
||||
if [ "$INSTALL_TOOL" = "dnf" ]; then
|
||||
OPT=$(dnf_opt "$IMAGE_NAME_RISOTTO_IMAGE_DIR" "$PKG")
|
||||
dnf --assumeyes $OPT
|
||||
else
|
||||
chroot "$IMAGE_NAME_RISOTTO_IMAGE_DIR" apt install --no-install-recommends --yes $PKG
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
if [ ! -f "$BASE_LOCK" ] || [ ! -f "$BASE_TAR" ]; then
|
||||
echo " - reinstallation de l'image de base"
|
||||
rm -rf "$BASE_DIR"
|
||||
new_package_base
|
||||
diff -u "$BASE_PKGS_FILE" "$BASE_PKGS_FILE".new && NEW_BASE=false || NEW_BASE=true
|
||||
if [ ! -f "$BASE_TAR" ] || [ "$NEW_BASE" = true ]; then
|
||||
mkdir -p "$IMAGE_BASE_RISOTTO_BASE_DIR"
|
||||
install_base
|
||||
cd "$IMAGE_BASE_RISOTTO_BASE_DIR"
|
||||
tar cf "$BASE_TAR" "$BASE_NAME"
|
||||
cd - > /dev/null
|
||||
if [ -f "$BASE_PKGS_FILE" ]; then
|
||||
mv "$BASE_PKGS_FILE" "$BASE_PKGS_FILE".old
|
||||
fi
|
||||
mv "$BASE_PKGS_FILE".new "$BASE_PKGS_FILE"
|
||||
rm -rf "$IMAGE_BASE_RISOTTO_BASE_DIR"
|
||||
fi
|
||||
rm -rf "$BASE_DIR"
|
||||
touch "$BASE_LOCK"
|
||||
fi
|
||||
|
||||
tar xf "$BASE_TAR"
|
||||
mv "$BASE_NAME" "$IMAGE_NAME_RISOTTO_IMAGE_DIR"
|
||||
if [ -n "$COPR" ]; then
|
||||
#FIXME signature...
|
||||
mkdir -p "$REPO_DIR"
|
||||
cd "$REPO_DIR"
|
||||
wget -q "$COPR"
|
||||
cd - > /dev/null
|
||||
fi
|
||||
if [ "$FUSION" = true ]; then
|
||||
dnf -y install "https://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-$RELEASEVER.noarch.rpm" --installroot="$IMAGE_NAME_RISOTTO_IMAGE_DIR" > /dev/null
|
||||
fi
|
||||
|
||||
# FIXME verifier s'il y a des modifs sur pre/post
|
||||
if [ -f "$IMAGE_NAME_RISOTTO_IMAGE_DIR".base.pkgs ] && [ -f "$IMAGE_NAME_RISOTTO_IMAGE_DIR".pkgs ]; then
|
||||
echo " - différence(s) avec les paquets de base"
|
||||
diff -u "$IMAGE_NAME_RISOTTO_IMAGE_DIR".base.pkgs "$BASE_PKGS_FILE" && INSTALL=false || INSTALL=true
|
||||
else
|
||||
INSTALL=true
|
||||
fi
|
||||
new_package
|
||||
if [ "$INSTALL" = false ]; then
|
||||
echo " - différence(s) avec les paquets de l'image"
|
||||
diff -u "$IMAGE_NAME_RISOTTO_IMAGE_DIR".pkgs "$IMAGE_NAME_RISOTTO_IMAGE_DIR".pkgs.new && INSTALL=false || INSTALL=true
|
||||
fi
|
||||
find "$IMAGE_DIR_RECIPIENT_IMAGE" -type f -exec md5sum '{}' \; > "$IMAGE_NAME_RISOTTO_IMAGE_DIR".md5sum.new
|
||||
if [ "$INSTALL" = false ]; then
|
||||
diff -u "$IMAGE_NAME_RISOTTO_IMAGE_DIR".md5sum "$IMAGE_NAME_RISOTTO_IMAGE_DIR".md5sum.new && INSTALL=false || INSTALL=true
|
||||
fi
|
||||
if [ "$INSTALL" = true ]; then
|
||||
echo " - installation"
|
||||
if [ -f "$IMAGE_NAME_RISOTTO_IMAGE_DIR"_"$RELEASEVER".version ]; then
|
||||
VERSION=$(cat "$IMAGE_NAME_RISOTTO_IMAGE_DIR"_"$RELEASEVER".version)
|
||||
else
|
||||
VERSION=0
|
||||
fi
|
||||
mkdir "$RISOTTO_IMAGE_DIR/tmp"
|
||||
ORI_DIR=$PWD
|
||||
cd "$RISOTTO_IMAGE_DIR/tmp"
|
||||
if [ ! "$VERSION" = 0 ] && [ -f "$IMAGE_NAME_RISOTTO_IMAGE_NAME" ]; then
|
||||
tar xf "$IMAGE_NAME_RISOTTO_IMAGE_NAME"
|
||||
# if [ "$INSTALL_TOOL" = "apt" ]; then
|
||||
# chown _apt "$IMAGE_NAME"
|
||||
# fi
|
||||
# else
|
||||
# mkdir "$IMAGE_NAME"
|
||||
fi
|
||||
#cd "$IMAGE_NAME"
|
||||
make_changelog "$IMAGE_NAME" "$VERSION" "$OS_NAME" "$RELEASEVER" > "$IMAGE_NAME_RISOTTO_IMAGE_DIR"_"$RELEASEVER"_"$VERSION"_changelog.md
|
||||
cd $ORI_DIR
|
||||
rm -rf "$RISOTTO_IMAGE_DIR/tmp"
|
||||
install_pkg
|
||||
sleep 2
|
||||
|
||||
for script in $(ls $IMAGE_DIR_RECIPIENT_IMAGE/postinstall/*.sh 2> /dev/null); do
|
||||
. "$script"
|
||||
done
|
||||
|
||||
CONTAINER=$IMAGE_NAME make_volatile /etc
|
||||
if [ ! "$?" = 0 ]; then
|
||||
echo "make_volatile failed"
|
||||
exit 1
|
||||
fi
|
||||
cd "$RISOTTO_IMAGE_DIR/$IMAGE_NAME"
|
||||
if [ -f "$IMAGE_NAME_RISOTTO_IMAGE_NAME" ]; then
|
||||
mv -f "$IMAGE_NAME_RISOTTO_IMAGE_NAME" "$IMAGE_NAME_RISOTTO_IMAGE_NAME".old
|
||||
fi
|
||||
tar cf "$IMAGE_NAME_RISOTTO_IMAGE_NAME" .
|
||||
sha256sum "$IMAGE_NAME_RISOTTO_IMAGE_NAME" > "$IMAGE_NAME_RISOTTO_IMAGE_NAME".sha
|
||||
cd - > /dev/null
|
||||
cp -f "$BASE_PKGS_FILE" "$IMAGE_NAME_RISOTTO_IMAGE_DIR".base.pkgs
|
||||
mv -f "$IMAGE_NAME_RISOTTO_IMAGE_DIR".pkgs.new "$IMAGE_NAME_RISOTTO_IMAGE_DIR".pkgs
|
||||
mv -f "$IMAGE_NAME_RISOTTO_IMAGE_DIR".md5sum.new "$IMAGE_NAME_RISOTTO_IMAGE_DIR".md5sum
|
||||
VERSION=$((VERSION + 1))
|
||||
echo "$VERSION" > "$IMAGE_NAME_RISOTTO_IMAGE_DIR"_"$RELEASEVER".version
|
||||
fi
|
||||
rm -rf "$IMAGE_NAME_RISOTTO_IMAGE_DIR"
|
||||
|
||||
echo " => OK"
|
||||
exit 0
|
181
sbin/make_changelog
Executable file
181
sbin/make_changelog
Executable file
|
@ -0,0 +1,181 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
import logging
|
||||
from dnf.conf import Conf
|
||||
from dnf.cli.cli import BaseCli, Cli
|
||||
from dnf.cli.output import Output
|
||||
from dnf.cli.option_parser import OptionParser
|
||||
from dnf.i18n import _, ucd
|
||||
from datetime import datetime, timezone
|
||||
from sys import argv
|
||||
from os import getcwd, unlink
|
||||
from os.path import isfile, join
|
||||
from glob import glob
|
||||
from subprocess import run
|
||||
|
||||
|
||||
# List new or removed file
|
||||
def read_dnf_pkg_file(os_name, filename1, filename2):
|
||||
if os_name == 'debian':
|
||||
idx_pkg = 0, 1
|
||||
idx_version = 1, 2
|
||||
header_idx = 0, 0
|
||||
else:
|
||||
idx_pkg = 0, 0
|
||||
idx_version = 2, 2
|
||||
header_idx = 2, 2
|
||||
pass
|
||||
pkgs = {}
|
||||
for fidx, filename in enumerate((filename1, filename2)):
|
||||
if not isfile(filename):
|
||||
continue
|
||||
with open(filename, 'r') as pkgs_fh:
|
||||
for idx, pkg_line in enumerate(pkgs_fh.readlines()):
|
||||
if idx < header_idx[fidx]:
|
||||
# header
|
||||
continue
|
||||
sp_line = pkg_line.strip().split()
|
||||
if len(sp_line) < idx_version[fidx] + 1:
|
||||
continue
|
||||
if sp_line[idx_pkg[fidx]] in pkgs:
|
||||
raise Exception(f'package already set {sp_line[0]}?')
|
||||
version = sp_line[idx_version[fidx]]
|
||||
if os_name == 'debian' and version.startswith('('):
|
||||
version = version[1:]
|
||||
pkgs[sp_line[idx_pkg[fidx]]] = version
|
||||
return pkgs
|
||||
|
||||
|
||||
def list_packages(title, packages, packages_info):
|
||||
print(f'# {title}\n')
|
||||
if not packages:
|
||||
print('*Aucun*')
|
||||
packages = list(packages)
|
||||
packages = sorted(packages)
|
||||
for idx, pkg in enumerate(packages):
|
||||
print(f' - {pkg} ({packages_info[pkg]})')
|
||||
print()
|
||||
|
||||
|
||||
# List updated packages
|
||||
class CustomOutput(Output):
|
||||
def listPkgs(self, *args, **kwargs):
|
||||
# do not display list
|
||||
pass
|
||||
|
||||
|
||||
def format_changelog_markdown(changelog):
|
||||
"""Return changelog formatted as in spec file"""
|
||||
text = '\n'.join([f' {line}' for line in changelog['text'].split('\n')])
|
||||
chlog_str = ' - %s %s\n\n%s\n' % (
|
||||
changelog['timestamp'].strftime("%a %b %d %X %Y"),
|
||||
ucd(changelog['author']),
|
||||
ucd(text))
|
||||
return chlog_str
|
||||
|
||||
|
||||
def print_changelogs_markdown(packages):
|
||||
# group packages by src.rpm to avoid showing duplicate changelogs
|
||||
self = BASE
|
||||
bysrpm = dict()
|
||||
for p in packages:
|
||||
# there are packages without source_name, use name then.
|
||||
bysrpm.setdefault(p.source_name or p.name, []).append(p)
|
||||
for source_name in sorted(bysrpm.keys()):
|
||||
bin_packages = bysrpm[source_name]
|
||||
print('- ' + _("Changelogs for {}").format(', '.join([str(pkg) for pkg in bin_packages])))
|
||||
print()
|
||||
for chl in self.latest_changelogs(bin_packages[0]):
|
||||
print(format_changelog_markdown(chl))
|
||||
|
||||
|
||||
def dnf_update(image_name):
|
||||
conf = Conf()
|
||||
# obsoletes are already listed
|
||||
conf.obsoletes = False
|
||||
with BaseCli(conf) as base:
|
||||
global BASE
|
||||
BASE = base
|
||||
base.print_changelogs = print_changelogs_markdown
|
||||
custom_output = CustomOutput(base.output.base, base.output.conf)
|
||||
base.output = custom_output
|
||||
cli = Cli(base)
|
||||
image_dir = join(getcwd(), image_name)
|
||||
cli.configure(['--setopt=install_weak_deps=False', '--nodocs', '--noplugins', '--installroot=' + image_dir, '--releasever', '35', 'check-update', '--changelog'], OptionParser())
|
||||
logger = logging.getLogger("dnf")
|
||||
for h in logger.handlers:
|
||||
logger.removeHandler(h)
|
||||
logger.addHandler(logging.NullHandler())
|
||||
cli.run()
|
||||
|
||||
|
||||
def main(os_name, image_name, old_version, releasever):
|
||||
date = datetime.now(timezone.utc).isoformat()
|
||||
if old_version == 0:
|
||||
title = f"Création de l'image {image_name}"
|
||||
subtitle = f"Les paquets de la première image {image_name} sur base Fedora {releasever}"
|
||||
else:
|
||||
title = f"Nouvelle version de l'image {image_name}"
|
||||
subtitle = f"Différence des paquets de l'image {image_name} sur base Fedora {releasever} entre la version {old_version} et {old_version + 1}"
|
||||
print(f"""+++
|
||||
title = "{title}"
|
||||
description = "{subtitle}"
|
||||
date = {date}
|
||||
updated = {date}
|
||||
draft = false
|
||||
template = "blog/page.html"
|
||||
|
||||
[taxonomies]
|
||||
authors = ["Automate"]
|
||||
|
||||
[extra]
|
||||
lead = "{subtitle}."
|
||||
type = "installe"
|
||||
+++
|
||||
""")
|
||||
new_dict = read_dnf_pkg_file(os_name, f'/var/lib/risotto/images/image_bases-{os_name}-{releasever}.pkgs', f'/var/lib/risotto/images/{image_name}.pkgs.new')
|
||||
new_pkg = new_dict.keys()
|
||||
old_file = f'/var/lib/risotto/images/{image_name}.pkgs'
|
||||
if not old_version or not isfile(old_file):
|
||||
list_packages('Liste des paquets', new_pkg, new_dict)
|
||||
else:
|
||||
ori_dict = read_dnf_pkg_file(os_name, f'/var/lib/risotto/images/{image_name}.base.pkgs', old_file)
|
||||
ori_pkg = ori_dict.keys()
|
||||
list_packages('Les paquets supprimés', ori_pkg - new_pkg, ori_dict)
|
||||
list_packages('Les paquets ajoutés', new_pkg - ori_pkg, new_dict)
|
||||
print('# Les paquets mises à jour\n')
|
||||
if os_name == 'fedora':
|
||||
dnf_update(image_name)
|
||||
else:
|
||||
for filename in glob('*.deb'):
|
||||
unlink(filename)
|
||||
for package in ori_pkg & new_dict:
|
||||
if ori_dict[package] == new_dict[package]:
|
||||
continue
|
||||
info = run(['apt', 'download', package], capture_output=True)
|
||||
if info.returncode:
|
||||
raise Exception(f'cannot download {package}: {info}')
|
||||
packages = list(glob('*.deb'))
|
||||
packages.sort()
|
||||
for package in packages:
|
||||
info = run(['chroot', '.', 'apt-listchanges', '--which', 'both', '-f', 'text', package], capture_output=True)
|
||||
if info.returncode:
|
||||
raise Exception(f'cannot list changes for {package}: {info}')
|
||||
header = True
|
||||
for line in info.stdout.decode().split('\n'):
|
||||
if not header:
|
||||
print(line)
|
||||
if line.startswith('-----------------------'):
|
||||
header = False
|
||||
print()
|
||||
unlink(package)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
image_name = argv[1]
|
||||
old_version = int(argv[2])
|
||||
os_name = argv[3]
|
||||
releasever = argv[4]
|
||||
main(os_name, image_name, old_version, releasever)
|
77
sbin/make_volatile
Executable file
77
sbin/make_volatile
Executable file
|
@ -0,0 +1,77 @@
|
|||
#!/bin/bash -e
|
||||
if [ -z $CONTAINER ]; then
|
||||
echo "PAS DE CONTAINER"
|
||||
exit 1
|
||||
fi
|
||||
ROOT="/var/lib/risotto/images/$CONTAINER"
|
||||
echo "$ROOT"
|
||||
DESTDIR="$ROOT/usr/lib/tmpfiles.d"
|
||||
CONF_DST="/usr/share/factory"
|
||||
EXCLUDES="^($ROOT/etc/passwd|$ROOT/etc/group|$ROOT/etc/.updated|$ROOT/etc/.pwd.lock|$ROOT/etc/systemd/network/dhcp.network|$ROOT/etc/sudoers.d/qemubuild)$"
|
||||
ONLY_COPY="^($ROOT/etc/localtime)$"
|
||||
FORCE_LINKS="^($ROOT/etc/udev/hwdb.bin)$"
|
||||
|
||||
function execute() {
|
||||
chroot $ROOT $@
|
||||
}
|
||||
|
||||
function file_dir_in_tmpfiles() {
|
||||
letter=$1
|
||||
directory=$2
|
||||
local_directory=$(echo $directory|sed "s@^$ROOT@@g")
|
||||
mode=$(execute "/usr/bin/stat" "--format" "%a" "$local_directory" | grep -o "[0-9.]\+")
|
||||
user=$(execute "/usr/bin/stat" "--format" "%U" "$local_directory" | grep -o "[0-9a-zA-Z.-]\+")
|
||||
group=$(execute "/usr/bin/stat" "--format" "%G" "$local_directory" | grep -o "[0-9a-zA-Z.-]\+")
|
||||
echo "$letter $local_directory $mode $user $group - -"
|
||||
}
|
||||
|
||||
function calc_symlink_in_tmpfiles() {
|
||||
dest_name=$1
|
||||
local_dest_name=$2
|
||||
src_file=$(readlink "$dest_name")
|
||||
symlink_in_tmpfiles "$local_dest_name" "$src_file"
|
||||
}
|
||||
|
||||
function symlink_in_tmpfiles() {
|
||||
dest_name=$1
|
||||
src_file=$2
|
||||
echo "L+ $dest_name - - - - $src_file"
|
||||
}
|
||||
|
||||
function main() {
|
||||
dir_config_orig=$1
|
||||
name="${dir_config_orig//\//-}"
|
||||
dir_config_orig=$ROOT$dir_config_orig
|
||||
|
||||
mkdir -p "$DESTDIR"
|
||||
mkdir -p "$ROOTCONF_DST$dir_config_orig"
|
||||
systemd_conf="$DESTDIR/risotto$name.conf"
|
||||
rm -f $systemd_conf
|
||||
shopt -s globstar
|
||||
for src_file in $dir_config_orig/**; do
|
||||
local_src=$(echo $src_file|sed "s@$ROOT@@g")
|
||||
dest_file="$ROOT$CONF_DST$local_src"
|
||||
if [[ "$src_file" =~ $EXCLUDES ]]; then
|
||||
echo "$src_file: exclude" >&2
|
||||
elif [[ -L "$src_file" ]]; then
|
||||
calc_symlink_in_tmpfiles "$src_file" "$local_src" >> $systemd_conf
|
||||
elif [[ "$src_file" =~ $FORCE_LINKS ]]; then
|
||||
symlink_in_tmpfiles "$src_file" "$dest_file" >> $systemd_conf
|
||||
elif [[ -d "$src_file" ]]; then
|
||||
file_dir_in_tmpfiles 'd' "$src_file" >> $systemd_conf
|
||||
[[ ! -d "$dest_file" ]] && mkdir -p "$dest_file"
|
||||
#echo "$src_file: directory ok"
|
||||
else
|
||||
if [[ ! "$src_file" =~ $ONLY_COPY ]]; then
|
||||
file_dir_in_tmpfiles "C" "$src_file" >> $systemd_conf
|
||||
fi
|
||||
[[ -e "$dest_file" ]] && rm -f "$dest_file"
|
||||
# not a symlink... an hardlink
|
||||
ln "$src_file" "$dest_file"
|
||||
#echo "$src_file: file ok"
|
||||
fi
|
||||
done
|
||||
}
|
||||
main "$1"
|
||||
echo "fin"
|
||||
exit 0
|
79
sbin/update_images
Normal file
79
sbin/update_images
Normal file
|
@ -0,0 +1,79 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
# root dir configuration
|
||||
RISOTTO_DIR="/var/lib/risotto"
|
||||
RISOTTO_IMAGE_DIR="$RISOTTO_DIR/images"
|
||||
# image configuration
|
||||
IMAGE_BASE_RISOTTO_BASE_DIR="$RISOTTO_IMAGE_DIR/image_bases"
|
||||
|
||||
rm -f $IMAGE_BASE_RISOTTO_BASE_DIR*.build
|
||||
|
||||
ls /var/lib/risotto/images_files/ | while read image; do
|
||||
if [ -d /var/lib/risotto/images_files/"$image" ]; then
|
||||
echo
|
||||
echo "Install image $image"
|
||||
/usr/local/sbin/build_image "$image"
|
||||
fi
|
||||
done
|
||||
rm -f $IMAGE_BASE_RISOTTO_BASE_DIR*.build
|
||||
|
||||
MACHINES=""
|
||||
for nspawn in $(ls /etc/systemd/nspawn/*.nspawn); do
|
||||
nspawn_file=$(basename $nspawn)
|
||||
machine=${nspawn_file%.*}
|
||||
MACHINES="$MACHINES$machine "
|
||||
MACHINE_MACHINES_DIR="/var/lib/machines/$machine"
|
||||
echo "Install machine $machine"
|
||||
SHA_MACHINE="$RISOTTO_DIR/configurations/sha/$machine".sha
|
||||
content=$(cat $SHA_MACHINE)
|
||||
IMAGE_NAME_RISOTTO_IMAGE_NAME=${content##* }
|
||||
diff -q "$IMAGE_NAME_RISOTTO_IMAGE_NAME".sha "$SHA_MACHINE" || (
|
||||
machinectl stop $machine
|
||||
while true; do
|
||||
machinectl status "$machine" > /dev/null 2>&1 || break
|
||||
sleep 1
|
||||
done
|
||||
rm -rf "$MACHINE_MACHINES_DIR"
|
||||
mkdir "$MACHINE_MACHINES_DIR"
|
||||
cd "$MACHINE_MACHINES_DIR"
|
||||
tar xf "$IMAGE_NAME_RISOTTO_IMAGE_NAME"
|
||||
cp -a "$IMAGE_NAME_RISOTTO_IMAGE_NAME".sha "$SHA_MACHINE"
|
||||
)
|
||||
done
|
||||
machinectl start $MACHINES
|
||||
STARTED=""
|
||||
DEGRADED=""
|
||||
found=true
|
||||
idx=0
|
||||
while [ $found = true ]; do
|
||||
found=false
|
||||
echo "tentative $idx"
|
||||
for machine in $MACHINES; do
|
||||
if ! echo $STARTED | grep -q " $machine "; then
|
||||
status=$(machinectl -q shell $machine /usr/bin/systemctl is-system-running || true)
|
||||
if echo "$status" | grep -q degraded; then
|
||||
STARTED="$STARTED $machine "
|
||||
DEGRADED="$DEGRADED $machine"
|
||||
elif echo "$status" | grep -q running; then
|
||||
STARTED="$STARTED $machine "
|
||||
else
|
||||
found=true
|
||||
echo "status actuel de $machine : $status"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
sleep 2
|
||||
idx=$((idx+1))
|
||||
if [ $idx = 60 ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
retcode=0
|
||||
for machine in $DEGRADED; do
|
||||
echo
|
||||
echo "========= $machine"
|
||||
machinectl -q shell $machine /usr/bin/systemctl --state=failed --no-legend --no-pager
|
||||
retcode=1
|
||||
done
|
||||
|
||||
exit $retcode
|
|
@ -21,6 +21,7 @@ class ModuleCfg():
|
|||
self.templates_dir = []
|
||||
self.extra_dictionaries = {}
|
||||
self.servers = []
|
||||
self.depends = []
|
||||
|
||||
def __repr__(self):
|
||||
return str(vars(self))
|
||||
|
@ -128,7 +129,6 @@ def load_applicationservice_cfg(appname: str,
|
|||
|
||||
|
||||
def load_applicationservice(appname: str,
|
||||
added: list,
|
||||
install_dir: str,
|
||||
cfg: ModuleCfg,
|
||||
applications: dict,
|
||||
|
@ -150,7 +150,7 @@ def load_applicationservice(appname: str,
|
|||
copy_manual_dir,
|
||||
copy_tests,
|
||||
)
|
||||
added.append(appname)
|
||||
cfg.depends.append(appname)
|
||||
with open(applicationservice_file) as yaml:
|
||||
app = yaml_load(yaml, Loader=SafeLoader)
|
||||
provider = app.get('provider')
|
||||
|
@ -163,19 +163,27 @@ def load_applicationservice(appname: str,
|
|||
suppliers.setdefault(supplier, [])
|
||||
if appname not in suppliers[supplier]:
|
||||
suppliers[supplier].append(appname)
|
||||
if 'distribution' in app:
|
||||
distribution = app['distribution']
|
||||
else:
|
||||
distribution = None
|
||||
for xml in app.get('depends', []):
|
||||
if xml in added:
|
||||
if xml in cfg.depends:
|
||||
continue
|
||||
load_applicationservice(xml,
|
||||
added,
|
||||
install_dir,
|
||||
cfg,
|
||||
applications,
|
||||
copy_manual_dir,
|
||||
copy_tests,
|
||||
providers,
|
||||
suppliers,
|
||||
)
|
||||
ret = load_applicationservice(xml,
|
||||
install_dir,
|
||||
cfg,
|
||||
applications,
|
||||
copy_manual_dir,
|
||||
copy_tests,
|
||||
providers,
|
||||
suppliers,
|
||||
)
|
||||
if ret:
|
||||
if distribution:
|
||||
raise Exception(f'duplicate distribution for {cfg.module_name} ({distribution} and {ret})')
|
||||
distribution = ret
|
||||
return distribution
|
||||
|
||||
|
||||
def load_image_informations(module_name: str,
|
||||
|
@ -188,18 +196,23 @@ def load_image_informations(module_name: str,
|
|||
suppliers: dict,
|
||||
) -> ModuleCfg:
|
||||
cfg = ModuleCfg(module_name)
|
||||
added = []
|
||||
distribution = None
|
||||
for applicationservice in datas['applicationservices']:
|
||||
load_applicationservice(applicationservice,
|
||||
added,
|
||||
install_dir,
|
||||
cfg,
|
||||
applications,
|
||||
copy_manual_dir,
|
||||
copy_tests,
|
||||
providers,
|
||||
suppliers,
|
||||
)
|
||||
ret = load_applicationservice(applicationservice,
|
||||
install_dir,
|
||||
cfg,
|
||||
applications,
|
||||
copy_manual_dir,
|
||||
copy_tests,
|
||||
providers,
|
||||
suppliers,
|
||||
)
|
||||
if ret:
|
||||
if distribution:
|
||||
raise Exception(f'duplicate distribution for {cfg.module_name} ({distribution} and {ret})')
|
||||
distribution = ret
|
||||
if module_name != 'host' and not distribution:
|
||||
raise Exception(f'cannot found any linux distribution for {module_name}')
|
||||
return cfg
|
||||
|
||||
|
||||
|
|
|
@ -2,7 +2,8 @@ from .utils import SERVERS, SERVERS_JSON, MULTI_FUNCTIONS, load_domains
|
|||
from .image import load_config, valid_mandatories # , load_modules_rougail_config
|
||||
from rougail import RougailConfig, RougailConvert
|
||||
from .rougail.annotator import calc_providers, calc_providers_global, calc_providers_dynamic, calc_providers_dynamic_follower, calc_providers_follower
|
||||
from os import makedirs
|
||||
from os.path import isfile
|
||||
from json import dump as json_dump, load as json_load
|
||||
#
|
||||
from tiramisu import Config
|
||||
from .utils import value_pprint
|
||||
|
@ -10,6 +11,18 @@ from rougail.utils import normalize_family
|
|||
from rougail import RougailSystemdTemplate
|
||||
#
|
||||
#
|
||||
def tiramisu_display_name(kls,
|
||||
dyn_name: 'Base'=None,
|
||||
suffix: str=None,
|
||||
) -> str:
|
||||
# FIXME
|
||||
if dyn_name is not None:
|
||||
name = kls.impl_getpath() + str(suffix)
|
||||
else:
|
||||
name = kls.impl_getpath()
|
||||
return name
|
||||
|
||||
|
||||
async def set_values(server_name, config, datas):
|
||||
if 'values' not in datas:
|
||||
return
|
||||
|
@ -50,7 +63,6 @@ FUNCTIONS = {'get_ip_from_domain': get_ip_from_domain,
|
|||
async def templates(server_name,
|
||||
config,
|
||||
templates_informations,
|
||||
srv=False,
|
||||
just_copy=False,
|
||||
):
|
||||
engine = RougailSystemdTemplate(config, templates_informations)
|
||||
|
@ -71,22 +83,20 @@ async def templates(server_name,
|
|||
await value_pprint(values, config)
|
||||
print(err)
|
||||
print(await config.option('general.nginx.nginx_default_http').value.get())
|
||||
exit(1)
|
||||
#raise err from err
|
||||
raise err from err
|
||||
if just_copy:
|
||||
for eng, old_engine in ori_engines.items():
|
||||
engine.engines[eng] = old_engine
|
||||
if srv:
|
||||
makedirs(srv)
|
||||
|
||||
|
||||
|
||||
async def load(display_name=None,
|
||||
async def load(cache_file,
|
||||
cache_values,
|
||||
clean_directories=False,
|
||||
copy_manual_dir=False,
|
||||
copy_tests=False,
|
||||
hide_secret=False,
|
||||
):
|
||||
display_name=tiramisu_display_name
|
||||
#load_zones()
|
||||
# # load images
|
||||
#FIXME useful
|
||||
|
@ -109,29 +119,34 @@ async def load(display_name=None,
|
|||
module_info = module_infos[datas['module']]
|
||||
functions_files |= set(module_info['infos'].functions_file)
|
||||
cfg['functions_file'] = list(functions_files)
|
||||
eolobj = RougailConvert(cfg)
|
||||
cfg['risotto_globals'] = {}
|
||||
for server_name, datas in SERVERS.items():
|
||||
module_info = module_infos[datas['module']]
|
||||
cfg['dictionaries_dir'] = module_info['infos'].dictionaries_dir
|
||||
cfg['extra_dictionaries'] = module_info['infos'].extra_dictionaries
|
||||
informations = SERVERS_JSON['servers'][server_name].get('informations')
|
||||
if informations:
|
||||
cfg['risotto_globals'][server_name] = {'global:server_name': server_name,
|
||||
'global:zones_name': informations['zones_name'],
|
||||
'global:zones_list': list(range(len(informations['zones_name']))),
|
||||
}
|
||||
values = []
|
||||
for s_idx in cfg['risotto_globals'][server_name]['global:zones_list']:
|
||||
if not s_idx:
|
||||
values.append(server_name)
|
||||
else:
|
||||
values.append(informations['extra_domainnames'][s_idx - 1])
|
||||
cfg['risotto_globals'][server_name]['global:server_names'] = values
|
||||
else:
|
||||
cfg['risotto_globals'][server_name] = {'global:server_name': server_name}
|
||||
eolobj.load_dictionaries(path_prefix=server_name)
|
||||
tiram_obj = eolobj.save(None)
|
||||
if not isfile(cache_file):
|
||||
eolobj = RougailConvert(cfg)
|
||||
cfg['risotto_globals'] = {}
|
||||
for server_name, datas in SERVERS.items():
|
||||
module_info = module_infos[datas['module']]
|
||||
cfg['dictionaries_dir'] = module_info['infos'].dictionaries_dir
|
||||
cfg['extra_dictionaries'] = module_info['infos'].extra_dictionaries
|
||||
informations = SERVERS_JSON['servers'][server_name].get('informations')
|
||||
if informations:
|
||||
cfg['risotto_globals'][server_name] = {'global:server_name': server_name,
|
||||
'global:zones_name': informations['zones_name'],
|
||||
'global:zones_list': list(range(len(informations['zones_name']))),
|
||||
}
|
||||
values = []
|
||||
for s_idx in cfg['risotto_globals'][server_name]['global:zones_list']:
|
||||
if not s_idx:
|
||||
values.append(server_name)
|
||||
else:
|
||||
values.append(informations['extra_domainnames'][s_idx - 1])
|
||||
cfg['risotto_globals'][server_name]['global:server_names'] = values
|
||||
else:
|
||||
cfg['risotto_globals'][server_name] = {'global:server_name': server_name}
|
||||
cfg['risotto_globals'][server_name]['global:module_name'] = datas['module']
|
||||
eolobj.load_dictionaries(path_prefix=server_name)
|
||||
tiram_obj = eolobj.save(cache_file)
|
||||
else:
|
||||
with open(cache_file) as fh:
|
||||
tiram_obj = fh.read()
|
||||
optiondescription = FUNCTIONS.copy()
|
||||
try:
|
||||
exec(tiram_obj, None, optiondescription)
|
||||
|
@ -141,11 +156,18 @@ async def load(display_name=None,
|
|||
config = await Config(optiondescription['option_0'],
|
||||
display_name=display_name,
|
||||
)
|
||||
await config.property.pop('validator')
|
||||
await config.property.pop('cache')
|
||||
for server_name, datas in SERVERS.items():
|
||||
await set_values(server_name, config, datas)
|
||||
await config.property.read_only()
|
||||
await config.property.add('cache')
|
||||
await valid_mandatories(config)
|
||||
if not isfile(cache_values):
|
||||
await config.property.pop('validator')
|
||||
await config.property.pop('cache')
|
||||
for server_name, datas in SERVERS.items():
|
||||
await set_values(server_name, config, datas)
|
||||
await config.property.read_only()
|
||||
await config.property.add('cache')
|
||||
await valid_mandatories(config)
|
||||
with open(cache_values, 'w') as fh:
|
||||
json_dump(await config.value.exportation(), fh)
|
||||
else:
|
||||
with open(cache_values, 'r') as fh:
|
||||
await config.value.importation(json_load(fh))
|
||||
await config.property.read_only()
|
||||
return module_infos, cfg, config
|
||||
|
|
Loading…
Reference in a new issue