Compare commits

..

No commits in common. "97e5b8e02ecb0719948bcfc605c3faf943fed8a6" and "57c108aea0975da51987afe741c559743b776d99" have entirely different histories.

23 changed files with 91 additions and 824 deletions

View file

@ -1,7 +1,7 @@
format: '0.1'
description: Gitea
depends:
- base-fedora-36
- base-fedora-35
- postgresql-client
- reverse-proxy-client
- relay-mail-client

View file

@ -10,3 +10,4 @@ CREATE DATABASE IF NOT EXISTS %%name CHARACTER SET utf8;
GRANT ALL PRIVILEGES ON %%name.* TO '%%name'@'%%server' IDENTIFIED BY '%%password';
%end for
FLUSH PRIVILEGES;

View file

@ -5,5 +5,5 @@ Before=network.target
[Service]
Type=oneshot
Environment=PGPASSFILE=/usr/local/lib/secrets/postgresql.pass
ExecStart=/usr/bin/timeout 300 bash -c 'while ! 3<> /dev/tcp/%%pg_client_server_domainname/5432; do sleep 1; done; echo "POSTGRESQL STARTED"'
ExecStart=/usr/bin/timeout 90 bash -c 'while ! 3<> /dev/tcp/%%pg_client_server_domainname/5432; do sleep 1; done; echo "POSTGRESQL STARTED"'
ExecStart=/usr/bin/timeout 90 bash -c 'while ! /usr/bin/psql --set=sslmode=verify-full -h %%pg_client_server_domainname -U %%pg_client_username %%pg_client_database -c "\l"; do sleep 1; done; echo "POSTGRESQL READY"'

View file

@ -2,5 +2,5 @@ format: '0.1'
description: Postgresql
depends:
- server
- base-fedora-36
- base-fedora-35
provider: Postgresql

View file

@ -2,7 +2,7 @@
<rougail version="0.10">
<services>
<service name="postgresql" target="multi-user">
<override/>
<override engine="none"/>
<ip ip_type='variable'>accounts.remote_.remote_ip_</ip>
<file>/etc/postgresql/postgresql.conf</file>
<file>/etc/postgresql/pg_hba.conf</file>
@ -10,11 +10,9 @@
<file engine="none">/etc/postgresql/pg_ident.conf</file>
<file engine="none" mode="755">/bin/postgresql_init</file>
<file engine="none" source="sysuser-postgresql.conf">/sysusers.d/0postgresql.conf</file>
<file engine="none" source="tmpfiles.postgresql.conf">/tmpfiles.d/0postgresql.conf</file>
<file>/etc/pki/ca-trust/source/anchors/ca_PostgreSQL.crt</file>
<file>/etc/pki/tls/certs/postgresql.crt</file>
<file owner="root" group="postgres" mode="440">/etc/pki/tls/private/postgresql.key</file>
<file>/tests/postgresql.yml</file>
</service>
</services>
<variables>

View file

@ -1,3 +1 @@
PKG="$PKG postgresql-server postgresql-contrib"
# for postgresql-setup
PKG="$PKG util-linux postgresql-upgrade"

View file

@ -88,7 +88,6 @@ local all postgres ident map=pg_map
# IPv4 local connections:
#>GNUNUX
# host all all 127.0.0.1/32 ident
hostssl rougail_test rougail_test %%gateway_eth0/32 md5
%for %%server in %%accounts.remotes
hostssl %%normalize_family(%%server) %%normalize_family(%%server) %%server md5
%end for

View file

@ -47,6 +47,9 @@ directiveStartToken = §
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#>GNUNUX
data_directory = '/srv/postgresql'
#<GNUNUX
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#>GNUNUX
@ -113,7 +116,7 @@ unix_socket_directories = '/var/run/postgresql'
#>GNUNUX
authentication_timeout = §§{pg_authentication_timeout}s
#<GNUNUX
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#password_encryption = md5 # md5 or scram-sha-256
#db_user_namespace = off
# GSSAPI using Kerberos
@ -126,7 +129,6 @@ authentication_timeout = §§{pg_authentication_timeout}s
#ssl_ca_file = ''
#ssl_cert_file = 'server.crt'
#ssl_crl_file = ''
##ssl_crl_dir = ''
#ssl_key_file = 'server.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
@ -154,8 +156,6 @@ shared_buffers = 128MB # min 128kB
shared_buffers = §§{pg_shared_buffers}§§pg_shared_buffers_unit
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
@ -184,7 +184,6 @@ dynamic_shared_memory_type = posix # the default is the first option
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
@ -200,7 +199,7 @@ dynamic_shared_memory_type = posix # the default is the first option
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
@ -213,17 +212,17 @@ dynamic_shared_memory_type = posix # the default is the first option
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#parallel_leader_participation = on
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------
@ -247,15 +246,13 @@ dynamic_shared_memory_type = posix # the default is the first option
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enable compression of full-page writes
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
#>GNUNUX
wal_buffers = §§pg_wal_buffers
#<GNUNUX
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
@ -267,14 +264,14 @@ wal_buffers = §§pg_wal_buffers
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
#>GNUNUX
#max_wal_size = 1GB
max_wal_size = §§{pg_max_wal_size}§§pg_max_wal_size_unit
#<GNUNUX
min_wal_size = 80MB
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
@ -295,6 +292,7 @@ min_wal_size = 80MB
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
# (change requires restart)
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
@ -329,19 +327,20 @@ min_wal_size = 80MB
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# - Master Server -
# These settings are ignored on a standby server.
@ -353,7 +352,7 @@ min_wal_size = 80MB
# - Standby Servers -
# These settings are ignored on a primary server.
# These settings are ignored on a master server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
@ -373,7 +372,7 @@ min_wal_size = 80MB
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
@ -394,26 +393,23 @@ min_wal_size = 80MB
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_incremental_sort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants -
@ -424,12 +420,6 @@ min_wal_size = 80MB
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#>GNUNUX
effective_cache_size = §§{pg_effective_cache_size}§§pg_effective_cache_size_unit
#<GNUNUX
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
@ -440,6 +430,10 @@ effective_cache_size = §§{pg_effective_cache_size}§§pg_effective_cache_size_
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
effective_cache_size = §§{pg_effective_cache_size}§§pg_effective_cache_size_unit
# - Genetic Query Optimizer -
@ -457,9 +451,10 @@ effective_cache_size = §§{pg_effective_cache_size}§§pg_effective_cache_size_
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#jit = on # allow JIT compilation
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
@ -470,24 +465,27 @@ effective_cache_size = §§{pg_effective_cache_size}§§pg_effective_cache_size_
# - Where to Log -
#>GNUNUX
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
log_destination = 'syslog'
#<GNUNUX
# This is used when logging to stderr:
#GNUNUX: logging_collector = on # Enable capturing of stderr and csvlog
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
#log_directory = 'pg_log' # directory where log files are written,
# can be absolute or relative to PGDATA
#GNUNUX: log_filename = 'postgresql-%a.log' # log file name pattern,
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#GNUNUX: log_truncate_on_rotation = on # If on, an existing log file with the
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
@ -495,14 +493,11 @@ effective_cache_size = §§{pg_effective_cache_size}§§pg_effective_cache_size_
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#GNUNUX: log_rotation_age = 1d # Automatic rotation of logfiles will
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#GNUNUX: log_rotation_size = 0 # Automatic rotation of logfiles will
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#>GNUNUX
log_destination = 'syslog'
#<GNUNUX
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
@ -510,7 +505,7 @@ log_destination = 'syslog'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
@ -570,11 +565,6 @@ log_destination = 'syslog'
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = -1 # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
@ -589,11 +579,9 @@ log_destination = 'syslog'
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
@ -606,8 +594,6 @@ log_destination = 'syslog'
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
@ -622,7 +608,6 @@ log_destination = 'syslog'
#FIXME en dure ?
log_timezone = 'Europe/Paris'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
@ -639,21 +624,19 @@ log_timezone = 'Europe/Paris'
# - Query and Index Statistics Collector -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#stats_temp_directory = 'pg_stat_tmp'
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
@ -669,6 +652,10 @@ autovacuum = on
autovacuum = off
§end if
#<GNUNUX
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
@ -714,11 +701,10 @@ autovacuum = off
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#default_table_access_method = 'heap'
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
@ -727,16 +713,17 @@ autovacuum = off
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
# before index cleanup, 0 always performs
# index cleanup
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
@ -770,15 +757,14 @@ default_text_search_config = 'pg_catalog.french'
# - Shared Library Preloading -
#shared_preload_libraries = '' # (change requires restart)
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
@ -806,6 +792,7 @@ default_text_search_config = 'pg_catalog.french'
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
@ -824,7 +811,6 @@ default_text_search_config = 'pg_catalog.french'
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------

View file

@ -1,38 +1,11 @@
[Service]
Environment=PGDATA=/srv/postgresql/postgresql
ExecStartPre=
ExecStartPre=+/usr/local/lib/bin/postgresql_init
ExecStartPre=/usr/libexec/postgresql-check-db-dir %N
Environment=PGDATA=/srv/postgresql
Environment=PG_CONF=/etc/postgresql/postgresql.conf
Environment=PG_HBA=/etc/postgresql/pg_hba.conf
Environment=PG_IDENT=/etc/postgresql/pg_ident.conf
Environment=LC_ALL=fr_FR.UTF-8
ExecStartPre=
ExecStartPre=+/usr/local/lib/bin/postgresql_init
# if upgrade needed, do it
ExecStartPre=/bin/bash -c '%slurp
/usr/libexec/postgresql-check-db-dir %N || (%slurp
echo "UPGRADE" &&%slurp
# directory creation must have 700 rights
umask 0077 &&%slurp
# pg_upgrade do not like ssl activation
/bin/grep -v "ssl " ${PG_CONF} > /tmp/postgresql.conf &&%slurp
mv -f /tmp/postgresql.conf ${PGDATA}/postgresql.conf &&%slurp
# pg_upgrade modify pg_hba.conf so copy it
/bin/rm ${PGDATA}/pg_hba.conf &&%slurp
/bin/cp -af ${PG_HBA} ${PGDATA} &&%slurp
# do upgrade
/usr/bin/postgresql-setup --upgrade &&%slurp
# re do link
ln -sf ${PG_HBA} ${PGDATA}/ &&%slurp
ln -sf ${PG_CONF} ${PGDATA}/ &&%slurp
# remove old cluster
/srv/postgresql/postgresql/delete_old_cluster.sh &&%slurp
rm -f /srv/postgresql/postgresql/delete_old_cluster.sh &&%slurp
# force index (see later)
touch /srv/postgresql/risotto_upgrade.lock%slurp
)'
# recheck db
ExecStartPre=/usr/libexec/postgresql-check-db-dir %N
ExecStart=
ExecStart=/usr/bin/postmaster -D ${PGDATA} -c config_file=${PG_CONF} -c hba_file=${PG_HBA} -c ident_file=${PG_IDENT}
ExecStartPost=-/usr/bin/psql -f /etc/postgresql/postgresql.sql
# if lock do reindex
ExecStartPost=/bin/bash -c 'if [ -f /srv/postgresql/risotto_upgrade.lock ];then echo REINDEX; /usr/bin/reindexdb && rm -f /srv/postgresql/risotto_upgrade.lock; fi'

View file

@ -1,12 +1,7 @@
%set %%new_accounts = [('rougail_test', %%get_password(server_name=%%domain_name_eth0, username='rougail_test', description="remote", type="cleartext", hide=%%hide_secret, temporary=True))]
%for %%server in %%accounts.remotes
%set %%name = %%normalize_family(%%server)
%set %%password = %%accounts["remote_" + %%name]["password_" + %%name]
%%new_accounts.append((%%name, %%password))%slurp
%end for
%for %%name, %%password in %%new_accounts
CREATE DATABASE "%%name";
CREATE ROLE "%%name" WITH LOGIN ENCRYPTED PASSWORD '%%password';
ALTER USER "%%name" PASSWORD '%%password';
CREATE ROLE "%%name" WITH LOGIN ENCRYPTED PASSWORD '%%accounts["remote_" + %%name]["password_" + %%name]';
ALTER USER "%%name" PASSWORD '%%accounts["remote_" + %%name]["password_" + %%name]';
GRANT ALL PRIVILEGES ON DATABASE "%%name" TO "%%name";
%end for

View file

@ -1,4 +0,0 @@
address: %%ip_eth0
user: rougail_test
password: %%get_password(server_name=%%domain_name_eth0, username='rougail_test', description="remote", type="cleartext", hide=%%hide_secret, temporary=True)
dbname: rougail_test

View file

@ -1,22 +1,14 @@
#!/bin/bash -e
if [ ! -d "/srv/postgresql" ]; then
/bin/mkdir -p /srv/postgresql/postgresql
/bin/chown -R postgres: /srv/postgresql
/usr/bin/postgresql-setup --initdb
#/bin/rm /srv/postgresql/postgresql.conf
#/bin/rm /srv/postgresql/pg_hba.conf
#/bin/rm /srv/postgresql/pg_ident.conf
elif [ ! -d "/srv/postgresql/postgresql" ]; then
# migrate /srv/postgresql to /srv/postgresql/postgresql
# needed for upgrade...
mkdir /srv/postgresql/postgresql
mv /srv/postgresql/* /srv/postgresql/postgresql || true
chown postgres: /srv/postgresql/postgresql
chmod 700 /srv/postgresql/postgresql
fi
# for postgresql-setup...
/bin/ln -sf /etc/postgresql/postgresql.conf /srv/postgresql/postgresql/postgresql.conf
/bin/ln -sf /etc/postgresql/pg_hba.conf /srv/postgresql/postgresql/pg_hba.conf
/bin/ln -sf /etc/postgresql/pg_ident.conf /srv/postgresql/postgresql/pg_ident.conf
[ -d "/srv/postgresql" ] && exit 0 || true
/bin/mkdir /srv/postgresql
/bin/chown postgres: /srv/postgresql
mkdir /var/lib/pgsql
/bin/chown postgres: /var/lib/pgsql
/usr/bin/postgresql-setup --initdb
/bin/rm /srv/postgresql/postgresql.conf
/bin/rm /srv/postgresql/pg_hba.conf
/bin/rm /srv/postgresql/pg_ident.conf
exit 0

View file

@ -1,3 +1,3 @@
g postgres 26 -
u postgres 26:26 "PostgreSQL Server" /srv/postgresql/postgresql /bin/bash
u postgres 26:26 "PostgreSQL Server" /srv/postgresql /bin/bash

View file

@ -1,2 +0,0 @@
# for postgresql-setup only...
d /var/lib/pgsql/ 0750 postgres postgres -

View file

@ -1,79 +0,0 @@
from yaml import load, SafeLoader
from os import environ
from pytest import raises
from psycopg2 import connect, OperationalError
def test_postgresql_wrong_password():
conf_file = f'{environ["MACHINE_TEST_DIR"]}/postgresql.yml'
with open(conf_file) as yaml:
data = load(yaml, Loader=SafeLoader)
with raises(OperationalError):
connect(host=data['address'], user=data['user'], password='a', database=data['dbname'])
def test_postgresql_connection():
conf_file = f'{environ["MACHINE_TEST_DIR"]}/postgresql.yml'
with open(conf_file) as yaml:
data = load(yaml, Loader=SafeLoader)
db = connect(host=data['address'], user=data['user'], password=data['password'], database=data['dbname'])
db.close()
def test_postgresql_migration():
conf_file = f'{environ["MACHINE_TEST_DIR"]}/postgresql.yml'
with open(conf_file) as yaml:
data = load(yaml, Loader=SafeLoader)
db = connect(host=data['address'], user=data['user'], password=data['password'], database=data['dbname'])
cursor = db.cursor()
if 'FIRST_RUN' in environ:
sql = """CREATE TABLE test (col CHAR(20) NOT NULL)"""
cursor.execute(sql)
sql = """INSERT INTO test (col) VALUES ('test')"""
cursor.execute(sql)
db.commit()
sql = """SELECT * FROM test"""
cursor.execute(sql)
results = cursor.fetchall()
assert len(results) == 1
results[0] == ('test',)
cursor.close()
db.close()
def test_postgresql_insert():
conf_file = f'{environ["MACHINE_TEST_DIR"]}/postgresql.yml'
with open(conf_file) as yaml:
data = load(yaml, Loader=SafeLoader)
db = connect(host=data['address'], user=data['user'], password=data['password'], database=data['dbname'])
cursor = db.cursor()
sql = """INSERT INTO test (col) VALUES ('test2')"""
cursor.execute(sql)
db.commit()
#
sql = """SELECT * FROM test WHERE col = 'test2'"""
cursor.execute(sql)
results = cursor.fetchall()
assert len(results) == 1
results[0] == ('test2',)
cursor.close()
db.close()
def test_postgresql_delete():
conf_file = f'{environ["MACHINE_TEST_DIR"]}/postgresql.yml'
with open(conf_file) as yaml:
data = load(yaml, Loader=SafeLoader)
db = connect(host=data['address'], user=data['user'], password=data['password'], database=data['dbname'])
cursor = db.cursor()
sql = """DELETE FROM test WHERE col = 'test2'"""
cursor.execute(sql)
db.commit()
#
sql = """SELECT * FROM test WHERE col = 'test2'"""
cursor.execute(sql)
results = cursor.fetchall()
assert len(results) == 0
cursor.close()
db.close()

View file

@ -34,7 +34,7 @@ class Authentication:
code = ret.status_code
content = ret.content
assert code == 200
assert b'<title trspan="authPortal">Authentication portal</title>' in content, f'cannot find LemonLdap title: {content}'
assert b'<title trspan="authPortal">Authentication portal</title>' in content
def auth_lemonldap(self,
req,

View file

@ -1,7 +1,7 @@
format: '0.1'
description: Vaultwarden
depends:
- base-fedora-36
- base-fedora-35
- postgresql-client
- relay-mail-client
- reverse-proxy-client

View file

@ -6,7 +6,6 @@
<file>/etc/pki/ca-trust/source/anchors/ca_InternalReverseProxy.crt</file>
<file engine="none" source="tmpfile-vaultwarden.conf">/tmpfiles.d/0vaultwarden.conf</file>
<file source="vaultwarden_config.env">/etc/vaultwarden/config.env</file>
<file>/tests/vaultwarden.yml</file>
</service>
</services>
<variables>
@ -28,13 +27,13 @@
</variable>
<variable name="vaultwarden_admin_email" type="mail" description="Adresse courriel de l'utilisateur Risotto" mandatory="True"/>
<variable name="vaultwarden_admin_password" type="password" description="Mot de passe de l'utilisateur Risotto" auto_save="False" hidden="True"/>
<variable name="vaultwarden_device_identifier" description="Identifiant de l'appareil se connectant" auto_save="False" hidden="True"/>
<variable name="vaultwarden_length" type="number" description="Taille par défaut du mot de passe">
<value>20</value>
</variable>
<variable name="vaultwarden_org_name" description="Nom de l'organisation lors de l'envoi des invitations" mandatory="True">
<value>Vaultwarden</value>
</variable>
<variable name="vaultwarden_test_device_identifier" description="Identifiant de test de l'appareil se connectant" hidden="True"/>
</family>
<family name="postgresql" description="PostgreSQL">
<variable name="pg_client_key_owner" redefine="True">
@ -51,9 +50,8 @@
<target>vaultwarden_admin_password</target>
<param name="hide" type="variable">hide_secret</param>
</fill>
<fill name="get_uuid">
<param name="server_name" type="variable">domain_name_eth0</param>
<target>vaultwarden_test_device_identifier</target>
<fill name="gen_uuid">
<target>vaultwarden_device_identifier</target>
</fill>
<fill name="calc_value">
<param type="boolean">True</param>

View file

@ -0,0 +1,6 @@
from uuid import uuid4 as _uuid4
def gen_uuid():
return str(_uuid4())

View file

@ -1,22 +1,6 @@
import __main__
from os.path import dirname as _dirname, abspath as _abspath, join as _join, isfile as _isfile, isdir as _isdir
from os import makedirs as _makedirs
from uuid import uuid4 as _uuid4
_HERE = _dirname(_abspath(__main__.__file__))
_PASSWORD_DIR = _join(_HERE, 'password')
def gen_uuid():
return str(_uuid4())
def get_uuid(server_name: str) -> str:
dir_name = _join(_PASSWORD_DIR, server_name)
if not _isdir(dir_name):
_makedirs(dir_name)
file_name = _join(dir_name, 'uuid')
if not _isfile(file_name):
uuid = str(_uuid4())
with open(file_name, 'w') as fh:
fh.write(uuid)
with open(file_name, 'r') as fh:
file_content = fh.read().strip()
return file_content

View file

@ -1,7 +0,0 @@
url: https://%%revprox_client_external_domainname%%{revprox_client_location[0]}
%set %%username='rougail_test@silique.fr'
username: %%username
password: %%get_password(server_name=%%domain_name_eth0, username=%%username, description='test', type="cleartext", hide=%%hide_secret, temporary=False)
privkey: %%srv_dir/vaultwarden/rsa_key.pem
uuid: %%vaultwarden_test_device_identifier
revprox_ip: %%revprox_client_server_ip

View file

@ -1,40 +0,0 @@
from yaml import load, SafeLoader
from os import environ
from mookdns import MookDns
from vaultwarden import VaultWarden
def test_vaultwarden_login():
conf_file = f'{environ["MACHINE_TEST_DIR"]}/vaultwarden.yml'
with open(conf_file) as yaml:
data = load(yaml, Loader=SafeLoader)
with MookDns(data['revprox_ip']):
vaultwarden = VaultWarden(data['url'], data['username'], data['uuid'], data['privkey'])
if 'FIRST_RUN' in environ:
vaultwarden.register(data['password'])
vaultwarden.login(data['password'])
vaultwarden.load_organizations()
def test_vaultwarden_collection():
conf_file = f'{environ["MACHINE_TEST_DIR"]}/vaultwarden.yml'
with open(conf_file) as yaml:
data = load(yaml, Loader=SafeLoader)
with MookDns(data['revprox_ip']):
vaultwarden = VaultWarden(data['url'], data['username'], data['uuid'], data['privkey'])
vaultwarden.login(data['password'])
vaultwarden.load_organizations()
if 'FIRST_RUN' in environ:
organization_id = vaultwarden.create_organization(data['username'],
'test_organization',
)
vaultwarden.create_collection(organization_id,
'test_collection',
)
assert len(vaultwarden.vaultwarden_organizations) == 2
for org in vaultwarden.vaultwarden_organizations:
if org is not None:
assert vaultwarden.vaultwarden_organizations[org]['name'] == 'test_organization'
assert len(vaultwarden.vaultwarden_organizations[org]['collections']) == 2
assert set(vaultwarden.vaultwarden_organizations[org]['collections']) == {'test_organization', 'test_collection'}

View file

@ -1,531 +0,0 @@
from typing import Union, Tuple, Optional
#python3-crypto
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import AES, PKCS1_OAEP
from hmac import new as hmac_new
from secrets import token_bytes
from time import time
from json import dumps
from hashlib import pbkdf2_hmac, sha256
#from aiohttp import ClientSession
from requests import session
from base64 import b64encode, b64decode
from hkdf import hkdf_expand
from collections import namedtuple
from os.path import isfile
from jwt import encode as jwt_encode, decode as jwt_decode
#BITWARDEN_PRIVATE_KEY = '/var/lib/vaultwarden_rs/rsa_key.der'
cipher_string_fields = {
'enc_type': lambda enc_type,iv,mac,ct: int(enc_type),
'iv': lambda enc_type,iv,mac,ct: iv,
'mac': lambda enc_type,iv,mac,ct: mac,
'ct': lambda enc_type,iv,mac,ct: ct,
}
CipherString = namedtuple('CipherString', cipher_string_fields.keys())
# support pulling apart a VaultWarden 'CipherString' from the following
# - cipher string: "<enc_type>.<iv>|<ct>|<mac>"
def cipher_string_from_str(cipher_string: str) -> CipherString:
enc_type, data = cipher_string.split('.', 1)
if enc_type == '2':
iv, ct, mac = (b64decode(sdata) for sdata in data.split('|', 2))
d = { k: fn(enc_type, iv, mac, ct) for k,fn in cipher_string_fields.items() }
else:
iv, mac = None, None
ct = b64decode(data)
d = { k: fn(enc_type, iv, mac, ct) for k, fn in cipher_string_fields.items() }
return CipherString(**d)
class VaultWarden:
def __init__(self,
url: str,
email: str,
uuid: str,
vaultwarden_key: str,
) -> None:
self.vaultwarden_url = url
self.vaultwarden_email = email.lower()
self.vaultwarden_uuid = uuid
self.vaultwarden_login = None
self.vaultwarden_organizations = None
self.vaultwarden_key = vaultwarden_key
def register(self,
password: str,
valid: bool=True,
) -> None:
iterations = self.get_iterations()
master_key, hash_password = self.hash_password(password,
iterations,
)
# generate symmetric key
token = token_bytes(64)
enc, mac = self._get_enc_mac(master_key)
key = self.encrypt_symmetric(token,
enc=enc,
mac=mac,
)
# generate asymmetric key
asym_key = RSA.generate(2048)
enc_private_key = self.encrypt_symmetric(asym_key.exportKey('DER', pkcs=8),
enc=token[:32],
mac=token[32:],
)
public_key = b64encode(asym_key.publickey().exportKey('DER')).decode()
data = {'name': self.vaultwarden_email.split('@')[0],
'email': self.vaultwarden_email,
'masterPasswordHash': hash_password,
'masterPasswordHint': None,
'key': key,
'kdf': 0,
'kdfIterations': iterations,
'referenceId': None,
'keys': {
'publicKey': public_key,
'encryptedPrivateKey': enc_private_key
}
}
register = self._post('api/accounts/register',
dumps(data),
)
if 'Object' in register and register['Object'] == 'error':
if register["ErrorModel"]['Message'] == 'User already exists':
return
raise Exception(register["ErrorModel"]["Message"])
if valid and isfile(self.vaultwarden_key):
self.login(password)
# values = self.get('/api/sync')
# user_id = values['Profile']['Id']
user_id = jwt_decode(self.vaultwarden_login['access_token'],
algorithm="RS256",
#pyjwt 1
verify=False,
#pyjwt 2
options={"verify_signature": False},
)['sub']
now = int(time())
url = self.vaultwarden_url
if url[-1] == '/':
url = url[:-1]
data = {'nbf': now,
'exp': now + 432000,
'iss': f'{url}|verifyemail',
'sub': user_id,
}
with open(self.vaultwarden_key, 'rb') as private_key_fh:
private_key = RSA.importKey(private_key_fh.read()).exportKey('PEM')
token = jwt_encode(data, private_key, algorithm="RS256")
if isinstance(token, bytes):
tocken = token.decode()
data = {'userId': user_id,
'token': token,
}
self._post('api/accounts/verify-email-token', dumps(data))
def login(self,
password: str,
) -> None:
iterations = self.get_iterations()
master_key, hash_password = self.hash_password(password,
iterations,
)
data = {'grant_type': 'password',
'username': self.vaultwarden_email,
'password': hash_password,
'scope': 'api offline_access',
'client_id': 'desktop',
'device_type': 7,
'device_identifier': self.vaultwarden_uuid,
'device_name': 'risotto',
}
vaultwarden_login = self._post('identity/connect/token', data)
if 'Object' in vaultwarden_login and vaultwarden_login['Object'] == 'error':
raise Exception(f'unable to log to VaultWarden: {vaultwarden_login["ErrorModel"]["Message"]}')
self.vaultwarden_login = vaultwarden_login
self.vaultwarden_login['master_key'] = master_key
self.vaultwarden_login['hash_password'] = hash_password
def get_iterations(self):
data = self._post('api/accounts/prelogin', dumps({'email': self.vaultwarden_email}))
return data['KdfIterations']
def hash_password(self,
password: str,
iterations: int,
) -> str:
master_key = pbkdf2_hmac('sha256',
password.encode(),
self.vaultwarden_email.encode(),
iterations,
)
passwd = pbkdf2_hmac('sha256',
master_key,
password.encode(),
1,
)
return master_key, b64encode(passwd).decode()
def decrypt(self,
cipher_string: str,
organization_id: str=None,
) -> None:
cipher = cipher_string_from_str(cipher_string)
if cipher.enc_type == 2:
return self.decrypt_symmetric(cipher,
organization_id,
)
elif cipher.enc_type == 4:
if organization_id:
raise Exception('cipher type {cipher.enc_type} cannot have organization_id')
return self.decrypt_asymmetric(cipher)
raise Exception(f'Unknown cipher type {cipher.enc_type}')
def decrypt_symmetric(self,
cipher: str,
organization_id: str=None,
enc: str=None,
mac: str=None,
) -> bytes:
# i.e: AesCbc256_HmacSha256_B64 (jslib/src/enums/encryptionType.ts)
assert cipher.enc_type == 2
if enc is None:
enc = self.vaultwarden_organizations[organization_id]['key'][:32]
mac = self.vaultwarden_organizations[organization_id]['key'][32:]
# verify the MAC
cmac = hmac_new(mac,
cipher.iv + cipher.ct,
sha256,
)
assert cipher.mac == cmac.digest()
# decrypt the content
c = AES.new(enc,
AES.MODE_CBC,
cipher.iv,
)
plaintext = c.decrypt(cipher.ct)
# remove PKCS#7 padding from payload, see RFC 5652
# https://tools.ietf.org/html/rfc5652#section-6.3
pad_len = plaintext[-1]
padding = bytes([pad_len] * pad_len)
if plaintext[-pad_len:] == padding:
plaintext = plaintext[:-pad_len]
return plaintext
def decrypt_asymmetric(self,
cipher: str,
) -> str:
private_key = self.decrypt(self.vaultwarden_login['PrivateKey'])
c = PKCS1_OAEP.new(RSA.importKey(private_key))
return c.decrypt(cipher.ct)
def encrypt_symmetric(self,
content: bytes,
organization_id: str=None,
enc: str=None,
mac: str=None,
) -> None:
iv = token_bytes(16)
if enc is None:
enc = self.vaultwarden_organizations[organization_id]['key'][:32]
mac = self.vaultwarden_organizations[organization_id]['key'][32:]
c = AES.new(enc,
AES.MODE_CBC,
iv,
)
pad_len = 16 - len(content) % 16
padding = bytes([ pad_len ] * pad_len)
ct = c.encrypt(content + padding)
cmac = hmac_new(mac,
iv + ct,
sha256,
)
return f"2.{b64encode(iv).decode()}|{b64encode(ct).decode()}|{b64encode(cmac.digest()).decode()}"
def encrypt_asymmetric(self,
plaintext: str,
key: str,
) -> str:
rsa_key = RSA.importKey(key)
cipher = PKCS1_OAEP.new(rsa_key).encrypt(plaintext)
b64_cipher = b64encode(cipher).decode()
return f"4.{b64_cipher}"
def get(self,
url: str,
) -> None:
with session() as req:
resp = req.get(self.vaultwarden_url + url, headers=self._get_headers())
assert resp.status_code == 200
try:
response = resp.json()
except:
response = resp.text
return response
def _post(self,
url: str,
data: dict,
) -> None:
with session() as req:
resp = req.post(self.vaultwarden_url + url,
data=data,
headers=self._get_headers(),
)
assert resp.status_code == 200, f'unable to post to url {self.vaultwarden_url}{url} with data {data}: {resp.text}'
try:
response = resp.json()
except:
response = resp.text
return response
def _put(self,
url: str,
data: dict,
) -> None:
with session() as req:
resp = req.put(self.vaultwarden_url + url,
data=data,
headers=self._get_headers(),
)
try:
response = resp.json()
except:
response = resp.text
return response
def _get_headers(self,
) -> None:
if self.vaultwarden_login == None:
return None
return {'Authorization': f'Bearer {self.vaultwarden_login["access_token"]}'}
def load_organizations(self,
only_default: bool=False,
) -> None:
values = self.get('/api/sync')
enc, mac = self._get_enc_mac(self.vaultwarden_login['master_key'])
# 'decrypt' the user_key to produce the actual keys
cipher = cipher_string_from_str(self.vaultwarden_login['Key'])
plaintext_userkey = self.decrypt_symmetric(cipher,
enc=enc,
mac=mac,
)
assert len(plaintext_userkey) == 64
self.vaultwarden_organizations = {None: {'key': plaintext_userkey, 'name': 'default', 'collections': {}}}
if not only_default:
for organization in values['Profile']['Organizations']:
plaintext = self.decrypt(organization['Key'])
self._add_organization(plaintext,
organization,
)
for collection in values['Collections']:
name = self.decrypt(collection['Name'],
collection['OrganizationId'],
).decode()
self.vaultwarden_organizations[collection['OrganizationId']]['collections'][name] = collection['Id']
def _get_enc_mac(self,
master_key: str,
) -> tuple:
enc = hkdf_expand(master_key,
b'enc',
32,
sha256,
)
mac = hkdf_expand(master_key,
b'mac',
32,
sha256,
)
return enc, mac
def _add_organization(self,
plaintext: bytes,
organization: dict,
) -> None:
organization_id = organization['Id']
self.vaultwarden_organizations[organization_id] = {'name': organization['Name'], 'key': plaintext, 'collections': {}}
def try_to_confirm(self,
organization_id,
email,
) -> bool:
# user is now in organization
user = self.get_user_informations(organization_id,
email,
)
# if account exists now, confirm it
if user['public_key']:
key = self.encrypt_asymmetric(self.vaultwarden_organizations[organization_id]['key'],
user['public_key'],
)
data = {"key": key}
confirmed = self._post(f'api/organizations/{organization_id}/users/{user["user_id"]}/confirm',
dumps(data),
)
return user['user_id'], 'Object' not in confirmed or confirmed['Object'] != 'error'
return user['user_id'], False
def get_user_informations(self,
organization_id: str,
email: str,
) -> None:
users = self.get(f'/api/organizations/{organization_id}/users')
for user in users['Data']:
if user['Email'] == email:
user_public_key = self.get(f'/api/users/{user["UserId"]}/public-key')
if not user_public_key['PublicKey']:
public_key = None
else:
public_key = b64decode(user_public_key['PublicKey'])
return {'user_id': user['Id'],
'public_key': public_key,
}
raise Exception(f'unknow email {email} in organization id {organization_id}')
def create_organization(self,
email: str,
organization_name: str,
) -> None:
private_key = self.decrypt(self.vaultwarden_login['PrivateKey'])
token = token_bytes(64)
key = self.encrypt_asymmetric(token,
private_key,
)
# defaut collection_name is organization_name
data = {
"key": key,
"collectionName": self.encrypt_symmetric(organization_name.encode(),
enc=token[:32],
mac=token[32:],
),
"name": organization_name,
"billingEmail": email,
"planType": 0,
}
organization = self._post('api/organizations',
dumps(data),
)
self.load_organizations()
#self._add_organization(token,
# organization,
# )
return organization['Id']
def invite(self,
organization_id: str,
email: str,
) -> bool:
data = {'emails': [email],
'collections': [],
'accessAll': False,
'type': 2,
}
for collection_id in self.vaultwarden_organizations[organization_id]['collections'].values():
data['collections'].append({'id': collection_id,
'readOnly': True,
'hidePasswords': False,
})
self._post(f'api/organizations/{organization_id}/users/invite',
dumps(data),
)
def create_collection(self,
organization_id: str,
collection_name: str,
user_id: str=None,
) -> None:
data = {"groups": [],
"name": self.encrypt_symmetric(collection_name.encode(),
organization_id,
),
}
collection = self._post(f'api/organizations/{organization_id}/collections',
dumps(data),
)
self.vaultwarden_organizations[organization_id]['collections'][collection_name] = collection['Id']
if user_id:
self.inscript_collection(organization_id,
collection['Id'],
user_id,
)
return collection['Id']
def inscript_collection(self,
organization_id: str,
collection_id: str,
user_id: str,
) -> None:
data = [{'id': user_id,
'readOnly': True,
'hidePasswords': False,
}]
self._put(f'api/organizations/{organization_id}/collections/{collection_id}/users',
dumps(data),
)
def store_password(self,
organization_id: str,
collection_id: str,
name: str,
username: str,
password: str,
uris: list=None,
) -> None:
"""create a cipher et store it in a share collection
"""
# FIXME uris are encoded
data = {"cipher": {
"type": 1,
"folderId": None,
"organizationId": organization_id,
"name": self.encrypt_symmetric(name.encode(),
organization_id,
),
"notes": None,
"favorite": False,
"login":
{"response": None,
"uris": uris,
"username": self.encrypt_symmetric(username.encode(),
organization_id,
),
"password": self.encrypt_symmetric(password.encode(),
organization_id,
),
"passwordRevisionDate": None,
"totp": None,
}
},
"collectionIds": [collection_id],
}
self._post('api/ciphers/admin',
dumps(data),
)
def get_password(self,
organization_id: str,
collection_name: str,
name: str,
username: str,
) -> list:
if not collection_name in self.vaultwarden_organizations[organization_id]['collections']:
return
collection_id = self.vaultwarden_organizations[organization_id]['collections'][collection_name]
ciphers = self.get(f'api/ciphers/organization-details?organizationId={organization_id}')
for cipher in ciphers['Data']:
if collection_id in cipher['CollectionIds'] and \
self.decrypt(cipher['Data']['Name'], organization_id).decode() == name and \
self.decrypt(cipher['Data']['Username'], organization_id).decode() == username:
return self.decrypt(cipher['Data']['Password'], organization_id).decode()