Archive abandoned project

This commit is contained in:
Benoit 2025-02-15 00:56:26 +09:00
parent bc8862d90b
commit 65be894048
501 changed files with 24305 additions and 0 deletions

6
cinc-repo/.chef-repo.txt Normal file
View file

@ -0,0 +1,6 @@
.chef-repo.txt
==============
This file gives the Chef CLI's generators a hint that you are using a Chef Infra
Repo and this is the root directory of your Chef Infra Repo. Chef CLI's generators
use this to generate code that is designed to work with the Chef Repo workflow.

129
cinc-repo/.gitignore vendored Normal file
View file

@ -0,0 +1,129 @@
## Below are example of common git excludes.
## Please note that /cookbooks folder is ignored. This allows users to
## clone individual cookbooks into the /cookbook folder of the chef repo
## and work on them in parallel. This pattern also allows for chef-workstation
## pattern, where base repo also builds out a dynamic chef workstation.
## Examples of workstation cookbooks:
## https://github.com/mwrock/chef_workstation
## https://github.com/Nordstrom/chefdk_bootstrap
## Ignore Chef related files and secrets
.chef
.chef/*.pem
.chef/encrypted_data_bag_secret
## Ignore Chef-Zero files
clients
nodes
## Ignore Policy lock
exported-policies
policyfiles/*.lock.json
# ## OS junk files
# [Tt]humbs.db
# *.DS_Store
# ## Example of the workstation pattern.
# !/cookbooks/chef_workstation/files/default/bundler/Gemfile
# !/cookbooks/chef_workstation/files/default/bundler/Gemfile.lock
# cookbooks/*
# !cookbooks/chef_workstation
# ##Chef
# .kitchen/
# .vagrant
# nodes
# metadata.json
# ##ruby
# *.gem
# Gemfile
# Gemfile.lock
.rake_test_cache
# ## Rails Heroku and other bits to ignore
# *.log
# *.sqlite3
# db/*.sqlite3
# .bundle
# log/*
# tmp/*
# public/system/*
# ## nodejs
# node_modules
# ## Nuget (exclude all exes except for the one in the global build folder)
# nuget.exe
# !build/nuget/nuget.exe
# *.nupkg
# # NuGet packages (based on default naming convention)
# [Bb]uild/[Pp]ackages/
# ## Build System # common build output folders
# build-common/
# output/
# ## VM images
# *.vhd
# *.vhdx
# ## Pester Test summary
# Test.xml
# ## JetBrains files
# *.idea
# .idea
# .idea/
# ## Mono files
# *.pidb
# *.userprefs
# ## Visual Studio files
# *.docstates
# *.[Oo]bj
# *.dat
# *.crc
# *.dbmdl
# *.pdb
# *.user
# *.aps
# *.pch
# *.vspscc
# *.vssscc
# *_i.c
# *_p.c
# *.ncb
# *.suo
# *.tlb
# *.tlh
# *.bak
# *.[Cc]ache
# *.ilk
# *.log
# *.lib
# *.sbr
# *.schemaview
# ipch/
# [Oo]bj/
# [Bb]in/*
# [Dd]ebug*/
# [Rr]elease*/
# Ankh.NoLoad
syntaxcache
# ## Tooling
# _ReSharper*/
# *.[Rr]e[Ss]harper
# [Tt]est[Rr]esult*
# .[Jj]ust[Cc]ode
# *ncrunch*
# ## Subversion files
# .svn
# ## Office Temp Files
# ~$*

201
cinc-repo/LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

20
cinc-repo/README.md Normal file
View file

@ -0,0 +1,20 @@
# Overview
Every Chef Infra installation needs a Chef Repository. This is the place where cookbooks, policyfiles, config files and other artifacts for managing systems with Chef Infra will live. We strongly recommend storing this repository in a version control system such as Git and treating it like source code.
## Repository Directories
This repository contains several directories, and each directory contains a README file that describes what it is for in greater detail, and how to use it for managing your systems with Chef.
- `cookbooks/` - Cookbooks you download or create.
- `data_bags/` - Store data bags and items in .json in the repository.
- `roles/` - Store roles in .rb or .json in the repository.
- `environments/` - Store environments in .rb or .json in the repository.
## Configuration
The config file, `.chef/config.rb` is a repository-specific configuration file for the knife command line tool. If you're using the Hosted Chef platform, you can download one for your organization from the management console. You can also generate a new config.rb by running `knife configure`. For more information about configuring Knife, see the Knife documentation at https://docs.chef.io/workstation/knife/
## Next Steps
Read the README file in each of the subdirectories for more information about what goes in those directories.

115
cinc-repo/chefignore Normal file
View file

@ -0,0 +1,115 @@
# Put files/directories that should be ignored in this file when uploading
# to a Chef Infra Server or Supermarket.
# Lines that start with '# ' are comments.
# OS generated files #
######################
.DS_Store
ehthumbs.db
Icon?
nohup.out
Thumbs.db
.envrc
# EDITORS #
###########
.#*
.project
.settings
*_flymake
*_flymake.*
*.bak
*.sw[a-z]
*.tmproj
*~
\#*
REVISION
TAGS*
tmtags
.vscode
.editorconfig
## COMPILED ##
##############
*.class
*.com
*.dll
*.exe
*.o
*.pyc
*.so
*/rdoc/
a.out
mkmf.log
# Testing #
###########
.circleci/*
.codeclimate.yml
.delivery/*
.foodcritic
.kitchen*
.mdlrc
.overcommit.yml
.rspec
.rubocop.yml
.travis.yml
.watchr
.yamllint
azure-pipelines.yml
Dangerfile
examples/*
features/*
Guardfile
kitchen.yml*
mlc_config.json
Procfile
Rakefile
spec/*
test/*
# SCM #
#######
.git
.gitattributes
.gitconfig
.github/*
.gitignore
.gitkeep
.gitmodules
.svn
*/.bzr/*
*/.git
*/.hg/*
*/.svn/*
# Berkshelf #
#############
Berksfile
Berksfile.lock
cookbooks/*
tmp
# Bundler #
###########
vendor/*
Gemfile
Gemfile.lock
# Policyfile #
##############
Policyfile.rb
Policyfile.lock.json
# Documentation #
#############
CODE_OF_CONDUCT*
CONTRIBUTING*
documentation/*
TESTING*
UPGRADING*
# Vagrant #
###########
.vagrant
Vagrantfile

View file

@ -0,0 +1 @@
../../esh_adguard/

View file

@ -0,0 +1 @@
../../esh_archivebox

View file

@ -0,0 +1 @@
../../esh_borgmatic

View file

@ -0,0 +1 @@
../../esh_cinc

View file

@ -0,0 +1 @@
../../esh_cloudflared

View file

@ -0,0 +1 @@
../../esh_docker/

View file

@ -0,0 +1 @@
../../esh_forgejo

View file

@ -0,0 +1 @@
../../esh_go_mmproxy/

View file

@ -0,0 +1 @@
../../esh_haproxy/

View file

@ -0,0 +1 @@
../../esh_kanboard/

View file

@ -0,0 +1 @@
../../esh_laminar

View file

@ -0,0 +1 @@
../../esh_letsencrypt/

1
cinc-repo/cookbooks/esh_lxd Symbolic link
View file

@ -0,0 +1 @@
../../esh_lxd

View file

@ -0,0 +1 @@
../../esh_mailcow/

View file

@ -0,0 +1 @@
../../esh_miniflux/

View file

@ -0,0 +1 @@
../../esh_mkdocs

View file

@ -0,0 +1 @@
../../esh_netplan/

View file

@ -0,0 +1 @@
../../esh_nginx/

View file

@ -0,0 +1 @@
../../esh_nitter/

View file

@ -0,0 +1 @@
../../esh_photoprism/

View file

@ -0,0 +1 @@
../../esh_piped

View file

@ -0,0 +1 @@
../../esh_syncthing

View file

@ -0,0 +1 @@
../../esh_system/

View file

@ -0,0 +1 @@
../../esh_systemd

1
cinc-repo/cookbooks/esh_ufw Symbolic link
View file

@ -0,0 +1 @@
../../esh_ufw/

View file

@ -0,0 +1 @@
../../esh_undocker

View file

@ -0,0 +1 @@
../../esh_vaultwarden/

View file

@ -0,0 +1 @@
../../esh_webhook

View file

@ -0,0 +1 @@
../../esh_wireguard/

View file

@ -0,0 +1 @@
../../esh_writefreely/

1
cinc-repo/cookbooks/esh_zfs Symbolic link
View file

@ -0,0 +1 @@
../../esh_zfs

View file

@ -0,0 +1,56 @@
# Data Bags
This directory contains directories of the various data bags you create for your infrastructure. Each subdirectory corresponds to a data bag on the Chef Infra Server, and contains JSON files of the items that go in the bag.
For example, in this directory, you'll find an example data bag directory called `example`, which contains an item definition called `example_item.json`
Before uploading this item to the server, we must first create the data bag on the Chef Infra Server.
knife data bag create example
Then we can upload the items in the data bag's directory to the Chef Infra Server.
knife data bag from file example example_item.json
For more information on data bags, see the Chef Infra docs site:
https://docs.chef.io/data_bags/
# Encrypted Data Bags
Encrypted data bags allow you to encrypt the contents of your data bags. The content of attributes will no longer be searchable. To use encrypted data bags, first you must have or create a secret key.
openssl rand -base64 512 > secret_key
You may use this secret_key to add items to a data bag during a create.
knife data bag create --secret-file secret_key passwords mysql
You may also use it when adding ITEMs from files,
knife data bag create passwords
knife data bag from file passwords data_bags/passwords/mysql.json --secret-file secret_key
The JSON for the ITEM must contain a key named "id" with a value equal to "ITEM" and the contents will be encrypted when uploaded. For example,
{
"id": "mysql",
"password": "abc123"
}
Without the secret_key, the contents are encrypted.
knife data bag show passwords mysql
id: mysql
password: 2I0XUUve1TXEojEyeGsjhw==
Use the secret_key to view the contents.
knife data bag show passwords mysql --secret-file secret_key
id: mysql
password: abc123
For more information on encrypted data bags, see the Chef Infra docs site:
https://docs.chef.io/data_bags/

View file

@ -0,0 +1,4 @@
{
"id": "example_item",
"key": "value"
}

33
cinc-repo/knife.rb Normal file
View file

@ -0,0 +1,33 @@
local_mode true
chef_repo_path File.expand_path('../' , __FILE__)
knife[:ssh_attribute] = "knife_zero.host"
knife[:use_sudo] = true
## use specific key file to connect server instead of ssh_agent(use ssh_agent is set true by default).
# knife[:identity_file] = "~/.ssh/id_rsa"
# knife[:ssh_identity_file] = 'PATH_TO_YOUR_PRIVATE_KEY' # Newer than Chef 14
## Attributes of node objects will be saved to json file.
## the automatic_attribute_whitelist option limits the attributes to be saved.
knife[:automatic_attribute_whitelist] = %w[
fqdn
os
os_version
hostname
ipaddress
roles
recipes
ipaddress
platform
platform_version
cloud
cloud_v2
chef_packages
]
use_policyfile true
versioned_cookbooks true
policy_document_native_api false
chef_server_url "http://localhost:8889" # for `chef push`

View file

@ -0,0 +1,24 @@
Create Policyfiles here. When using a chef-repo, give your Policyfiles
the same filename as the name set in the policyfile itself, and use the
`.rb` file extension.
Compile the policy with a command like this:
```
chef install policyfiles/my-app-frontend.rb
```
This will create a lockfile `policyfiles/my-app-frontend.lock.json`.
To update locked dependencies, run `chef update` like this:
```
chef update policyfiles/my-app-frontend.rb
```
You can upload the policy (with associated cookbooks) to the server
using a command like:
```
chef push staging policyfiles/my-app-frontend.rb
```

View file

@ -0,0 +1,33 @@
name 'archive'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_docker::service
esh_archivebox::system
esh_archivebox::compose
)
###
# Attributes
###
###
# esh_archivebox
###
default['esh']['archivebox']['username'] = 'benpro'
default['esh']['archivebox']['email'] = 'archivebox@benpro.fr'
default['esh']['archivebox']['password'] = ''

View file

@ -0,0 +1,40 @@
name 'blog'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_writefreely::install
esh_writefreely::mariadb
esh_writefreely::service
)
###
# Attributes
###
###
# esh_writefreely
###
version = '0.13.1'
default['esh']['writefreely']['version'] = version
default['esh']['writefreely']['url'] = "https://github.com/writefreely/writefreely/releases/download/v#{version}/writefreely_#{version}_linux_amd64.tar.gz"
default['esh']['writefreely']['mariadb']['password'] = ''
default['esh']['writefreely']['app']['site_name'] = 'blog.benpro.fr'
default['esh']['writefreely']['app']['host'] = 'https://blog.benpro.fr'
default['esh']['writefreely']['app']['single_user'] = 'true'
default['esh']['writefreely']['nginx']['ip_addr'] = '127.0.0.1'
default['esh']['writefreely']['nginx']['port'] = '8080'

View file

@ -0,0 +1,433 @@
name 'dns'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_go_mmproxy::default
esh_adguard::default
)
###
# Attributes
###
###
# esh_go_mmproxy
###
# to:listen
default['esh']['go_mmproxy']['proxies'] = {
'853': '10853',
}
default['esh']['go_mmproxy']['prefixes'] = <<~EOT
10.0.0.0/8
EOT
###
# esh_adguard
###
default['esh']['adguard']['cert_pub'] = 'http://10.10.10.1:8898/dns.benoit.jp.net/fullchain.pem'
default['esh']['adguard']['cert_priv'] = 'http://10.10.10.1:8898/dns.benoit.jp.net/privkey.pem'
default['esh']['adguard']['cert_auth'] = ''
default['esh']['adguard']['version'] = '0.107.55'
default['esh']['adguard']['config'] = <<~EOT
http:
pprof:
port: 6060
enabled: false
address: 0.0.0.0:80
session_ttl: 720h
users:
- name: benoit
password:
auth_attempts: 5
block_auth_min: 15
http_proxy: ""
language: en
theme: dark
dns:
bind_hosts:
- 0.0.0.0
port: 1053
anonymize_client_ip: false
ratelimit: 100
ratelimit_subnet_len_ipv4: 24
ratelimit_subnet_len_ipv6: 56
ratelimit_whitelist: []
refuse_any: true
upstream_dns:
- '# Quad9'
- https://dns11.quad9.net/dns-query
- '# CloudFlare'
- tls://1dot1dot1dot1.cloudflare-dns.com
- https://dns.cloudflare.com/dns-query
- '# IIJ'
- tls://public.dns.iij.jp
- https://public.dns.iij.jp/dns-query
- '# NextDNS'
- tls://dns.nextdns.io
- https://dns.nextdns.io
- https://anycast.dns.nextdns.io
- tls://anycast.dns.nextdns.io
- '# AdGuard'
- https://unfiltered.adguard-dns.com/dns-query
- tls://unfiltered.adguard-dns.com
- quic://unfiltered.adguard-dns.com
- '# Cisco OpenDNS'
- https://doh.opendns.com/dns-query
- '# Google'
- https://dns.google/dns-query
- tls://dns.google
- '# Tailscale'
- '[/taile088c7.ts.net/]100.100.100.100'
upstream_dns_file: ""
bootstrap_dns:
- 94.140.14.140
fallback_dns: []
upstream_mode: load_balance
fastest_timeout: 1s
allowed_clients:
- chiisai-firefox
- bluejay
- chiisai-chromium
- tangorpro
- reven
- lavie-firefox
- lavie-chromium
- caiman
disallowed_clients: []
blocked_hosts:
- version.bind
- id.server
- hostname.bind
trusted_proxies:
- 127.0.0.0/8
- ::1/128
- 10.0.0.0/8
cache_size: 4194304
cache_ttl_min: 0
cache_ttl_max: 0
cache_optimistic: true
bogus_nxdomain: []
aaaa_disabled: false
enable_dnssec: true
edns_client_subnet:
custom_ip: ""
enabled: false
use_custom: false
max_goroutines: 50
handle_ddr: true
ipset: []
ipset_file: ""
bootstrap_prefer_ipv6: false
upstream_timeout: 10s
private_networks: []
use_private_ptr_resolvers: true
local_ptr_upstreams: []
use_dns64: false
dns64_prefixes: []
serve_http3: false
use_http3_upstreams: false
serve_plain_dns: false
hostsfile_enabled: true
tls:
enabled: true
server_name: dns.benoit.jp.net
force_https: true
port_https: 443
port_dns_over_tls: 853
port_dns_over_quic: 784
port_dnscrypt: 0
dnscrypt_config_file: ""
allow_unencrypted_doh: false
certificate_chain: ""
private_key: ""
certificate_path: /etc/adguard/fullchain.pem
private_key_path: /etc/adguard/privkey.pem
strict_sni_check: false
querylog:
dir_path: ""
ignored: []
interval: 2160h
size_memory: 1000
enabled: true
file_enabled: true
statistics:
dir_path: ""
ignored: []
interval: 2160h
enabled: true
filters:
- enabled: true
url: https://logroid.github.io/adaway-hosts/hosts.txt
name: AdAway Blocking Hosts File for Japan
id: 1598087715
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_32.txt
name: The NoTracking blocklist
id: 1686439100
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_1.txt
name: AdGuard DNS filter
id: 1686439101
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_2.txt
name: AdAway Default Blocklist
id: 1686439102
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_23.txt
name: WindowsSpyBlocker - Hosts spy rules
id: 1686439103
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_6.txt
name: Dandelion Sprout's Game Console Adblock List
id: 1686439104
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_7.txt
name: Perflyst and Dandelion Sprout's Smart-TV Blocklist
id: 1686439105
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_11.txt
name: Malicious URL Blocklist (URLHaus)
id: 1686439106
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_9.txt
name: The Big List of Hacked Malware Web Sites
id: 1686439107
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_31.txt
name: Stalkerware Indicators List
id: 1686439108
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_42.txt
name: ShadowWhisperer's Malware List
id: 1686439109
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_10.txt
name: Scam Blocklist by DurableNapkin
id: 1686439110
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_8.txt
name: NoCoin Filter List
id: 1686439111
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_12.txt
name: Dandelion Sprout's Anti-Malware List
id: 1686439112
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_30.txt
name: Phishing URL Blocklist (PhishTank and OpenPhish)
id: 1686439113
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_24.txt
name: 1Hosts (Lite)
id: 1686439114
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_4.txt
name: Dan Pollock's List
id: 1686439115
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_27.txt
name: OISD Blocklist Big
id: 1686439117
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_3.txt
name: Peter Lowe's Blocklist
id: 1686439118
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_33.txt
name: Steven Black's List
id: 1686439119
whitelist_filters: []
user_rules:
- '@@||rd.rakuten.co.jp^$important'
- '@@||fls-fe.amazon.co.jp^$important'
- '@@||searchapi.agoda.com^$important'
- '@@||auth.split.io^$important'
- '@@||sdk.split.io^$important'
- ""
dhcp:
enabled: false
interface_name: ""
local_domain_name: lan
dhcpv4:
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
options: []
dhcpv6:
range_start: ""
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
filtering:
blocking_ipv4: ""
blocking_ipv6: ""
blocked_services:
schedule:
time_zone: Local
ids:
- facebook
- twitter
- snapchat
- origin
- epic_games
- vk
- mail_ru
- discord
- ok
- tiktok
- 9gag
- hulu
- whatsapp
- wechat
- tinder
- skype
- pinterest
- disneyplus
- qq
- weibo
- telegram
- roblox
- icloud_private_relay
- zhihu
- minecraft
- douban
- deezer
- bilibili
- instagram
- iqiyi
- lazada
- riot_games
- tidal
- twitch
- voot
- xboxlive
- rakuten_viki
- leagueoflegends
- kakaotalk
- hbomax
- crunchyroll
- kik
- onlyfans
- shopee
- soundcloud
- valorant
- shein
- temu
- yy
- xiaohongshu
- wargaming
- ubisoft
- wizz
- samsung_tv_plus
- nebula
- lionsgateplus
- fifa
- dropbox
- discoveryplus
- coolapk
- claro
- bluesky
- betfair
- apple_streaming
- 500px
- amino
- betano
- bigo_live
- blizzard_entertainment
- canais_globo
- clubhouse
- electronic_arts
- globoplay
- linkedin
- paramountplus
- plenty_of_fish
- privacy
- rockstar_games
- pluto_tv
- mercado_libre
- looke
- kook
- iheartradio
- espn
- directvgo
- box
- blaze
- betway
- battle_net
- activision_blizzard
- 4chan
- ebay
- olvid
- peacock_tv
- slack
- spotify
- tumblr
protection_disabled_until: null
safe_search:
enabled: false
bing: true
duckduckgo: true
ecosia: true
google: true
pixabay: true
yandex: true
youtube: true
blocking_mode: null_ip
parental_block_host: family-block.dns.adguard.com
safebrowsing_block_host: standard-block.dns.adguard.com
rewrites: []
safe_fs_patterns:
- /var/lib/adguard/data/userfilters/*
safebrowsing_cache_size: 1048576
safesearch_cache_size: 1048576
parental_cache_size: 1048576
cache_time: 30
filters_update_interval: 24
blocked_response_ttl: 10
filtering_enabled: true
parental_enabled: true
safebrowsing_enabled: true
protection_enabled: true
clients:
runtime_sources:
whois: true
arp: true
rdns: true
dhcp: true
hosts: true
persistent: []
log:
enabled: true
file: ""
max_backups: 0
max_size: 100
max_age: 3
compress: false
local_time: false
verbose: false
os:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 29
EOT

View file

@ -0,0 +1,39 @@
name 'flux'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_miniflux::default
)
###
# Attributes
###
###
# esh_miniflux
###
default['esh']['miniflux']['base_url'] = 'https://flux.benoit.jp.net'
default['esh']['miniflux']['postgresql']['password'] = ''
default['esh']['miniflux']['configfile'] = <<~EOT
# See https://miniflux.app/docs/configuration.html
RUN_MIGRATIONS=1
PROXY_IMAGES=all
DATABASE_URL=user=miniflux password=#{default['esh']['miniflux']['postgresql']['password']} dbname=miniflux
BASE_URL=#{default['esh']['miniflux']['base_url']}
LISTEN_ADDR=0.0.0.0:8080
EOT

View file

@ -0,0 +1,127 @@
name 'gallery'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_photoprism::mariadb
esh_photoprism::system
esh_photoprism::undocker
)
###
# Attributes
###
###
# esh_writefreely
###
default['esh']['photoprism']['nginx']['ip_addr'] = '127.0.0.1'
default['esh']['photoprism']['nginx']['port'] = '2342'
default['esh']['photoprism']['docker']['url'] = 'docker.io/photoprism'
default['esh']['photoprism']['docker']['image'] = 'photoprism'
default['esh']['photoprism']['docker']['tag'] = '221118-jammy'
default['esh']['photoprism']['docker']['network'] = 'host'
PHOTOPRISM_ADMIN_USER = 'benoit'.freeze
PHOTOPRISM_ADMIN_PASSWORD = ''.freeze
PHOTOPRISM_AUTH_MODE = 'password'.freeze
PHOTOPRISM_SITE_URL = 'https://gallery.benpro.fr'.freeze
PHOTOPRISM_ORIGINALS_LIMIT = 5000
PHOTOPRISM_HTTP_COMPRESSION = 'gzip'.freeze
PHOTOPRISM_LOG_LEVEL = 'info'.freeze
PHOTOPRISM_READONLY = 'true'.freeze
PHOTOPRISM_EXPERIMENTAL = 'false'.freeze
PHOTOPRISM_DISABLE_CHOWN = 'true'.freeze
PHOTOPRISM_DISABLE_WEBDAV = 'true'.freeze
PHOTOPRISM_DISABLE_SETTINGS = 'false'.freeze
PHOTOPRISM_DISABLE_TENSORFLOW = 'false'.freeze
PHOTOPRISM_DISABLE_FACES = 'false'.freeze
PHOTOPRISM_DISABLE_CLASSIFICATION = 'false'.freeze
PHOTOPRISM_DISABLE_RAW = 'false'.freeze
PHOTOPRISM_RAW_PRESETS = 'false'.freeze
PHOTOPRISM_JPEG_QUALITY = 85
PHOTOPRISM_DETECT_NSFW = 'false'.freeze
PHOTOPRISM_UPLOAD_NSFW = 'true'.freeze
PHOTOPRISM_DATABASE_DRIVER = 'mysql'.freeze
PHOTOPRISM_DATABASE_SERVER = 'localhost:3306'.freeze
PHOTOPRISM_DATABASE_NAME = 'photoprism'.freeze
PHOTOPRISM_DATABASE_USER = 'photoprism'.freeze
PHOTOPRISM_DATABASE_PASSWORD = 'Enrage-Spring-Refill1'.freeze
PHOTOPRISM_SITE_CAPTION = 'Benpro Gallery'.freeze
PHOTOPRISM_SITE_DESCRIPTION = 'Benpro photos'.freeze
PHOTOPRISM_SITE_AUTHOR = 'benpro.fr'.freeze
PHOTOPRISM_INIT = 'tensorflow'.freeze
PHOTOPRISM_UID = 998
PHOTOPRISM_GID = 998
default['esh']['photoprism']['mariadb']['password'] = PHOTOPRISM_DATABASE_PASSWORD
default['esh']['photoprism']['docker']['env'] = [
"PHOTOPRISM_ADMIN_USER=#{PHOTOPRISM_ADMIN_USER}",
"PHOTOPRISM_ADMIN_PASSWORD=#{PHOTOPRISM_ADMIN_PASSWORD}",
"PHOTOPRISM_AUTH_MODE=#{PHOTOPRISM_AUTH_MODE}",
"PHOTOPRISM_SITE_URL=#{PHOTOPRISM_SITE_URL}",
"PHOTOPRISM_ORIGINALS_LIMIT=#{PHOTOPRISM_ORIGINALS_LIMIT}",
"PHOTOPRISM_HTTP_COMPRESSION=#{PHOTOPRISM_HTTP_COMPRESSION}",
"PHOTOPRISM_LOG_LEVEL=#{PHOTOPRISM_LOG_LEVEL}",
"PHOTOPRISM_READONLY=#{PHOTOPRISM_READONLY}",
"PHOTOPRISM_EXPERIMENTAL=#{PHOTOPRISM_EXPERIMENTAL}",
"PHOTOPRISM_DISABLE_CHOWN=#{PHOTOPRISM_DISABLE_CHOWN}",
"PHOTOPRISM_DISABLE_WEBDAV=#{PHOTOPRISM_DISABLE_WEBDAV}",
"PHOTOPRISM_DISABLE_SETTINGS=#{PHOTOPRISM_DISABLE_SETTINGS}",
"PHOTOPRISM_DISABLE_TENSORFLOW=#{PHOTOPRISM_DISABLE_TENSORFLOW}",
"PHOTOPRISM_DISABLE_FACES=#{PHOTOPRISM_DISABLE_FACES}",
"PHOTOPRISM_DISABLE_CLASSIFICATION=#{PHOTOPRISM_DISABLE_CLASSIFICATION}",
"PHOTOPRISM_DISABLE_RAW=#{PHOTOPRISM_DISABLE_RAW}",
"PHOTOPRISM_RAW_PRESETS=#{PHOTOPRISM_RAW_PRESETS}",
"PHOTOPRISM_JPEG_QUALITY=#{PHOTOPRISM_JPEG_QUALITY}",
"PHOTOPRISM_DETECT_NSFW=#{PHOTOPRISM_DETECT_NSFW}",
"PHOTOPRISM_UPLOAD_NSFW=#{PHOTOPRISM_UPLOAD_NSFW}",
"PHOTOPRISM_DATABASE_DRIVER=#{PHOTOPRISM_DATABASE_DRIVER}",
"PHOTOPRISM_DATABASE_SERVER=#{PHOTOPRISM_DATABASE_SERVER}",
"PHOTOPRISM_DATABASE_NAME=#{PHOTOPRISM_DATABASE_NAME}",
"PHOTOPRISM_DATABASE_USER=#{PHOTOPRISM_DATABASE_USER}",
"PHOTOPRISM_DATABASE_PASSWORD=#{PHOTOPRISM_DATABASE_PASSWORD}",
"PHOTOPRISM_SITE_CAPTION=#{PHOTOPRISM_SITE_CAPTION}",
"PHOTOPRISM_SITE_DESCRIPTION=#{PHOTOPRISM_SITE_DESCRIPTION}",
"PHOTOPRISM_SITE_AUTHOR=#{PHOTOPRISM_SITE_AUTHOR}",
"PHOTOPRISM_INIT=#{PHOTOPRISM_INIT}",
"PHOTOPRISM_UID=#{PHOTOPRISM_UID}",
"PHOTOPRISM_GID=#{PHOTOPRISM_GID}",
]
default['esh']['photoprism']['docker']['service'] = <<~EOT
[Unit]
Description=ESH Undockerized photoprism Service
After=network.target mariadb.service
Requires=mariadb.service
[Service]
Type=simple
ExecStart=/usr/bin/systemd-nspawn \
--oci-bundle=/var/lib/machines/photoprism \
--machine photoprism \
--hostname photoprism \
--bind=/var/lib/gallery-originals:/photoprism/originals \
--bind=/var/lib/gallery-storage:/photoprism/storage \
--resolv-conf=bind-stub \
--capability=CAP_SETUID
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOT

View file

@ -0,0 +1,122 @@
name 'git'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_forgejo::system
esh_forgejo::mariadb
esh_forgejo::service
)
###
# Attributes
###
###
# esh_forgejo
###
default['esh']['forgejo']['mariadb']['password'] = ''
default['esh']['forgejo']['service']['binary'] = 'https://codeberg.org/forgejo/forgejo/releases/download/v8.0.1/forgejo-8.0.1-linux-amd64'
default['esh']['forgejo']['service']['asc'] = 'https://codeberg.org/forgejo/forgejo/releases/download/v8.0.1/forgejo-8.0.1-linux-amd64.asc'
default['esh']['forgejo']['service']['load_config'] = true
default['esh']['forgejo']['service']['config'] = <<~EOT
APP_NAME = Benoit's git
RUN_USER = git
RUN_MODE = prod
WORK_PATH = /var/lib/gitea
[database]
DB_TYPE = mysql
HOST = 127.0.0.1:3306
NAME = git
USER = git
PASSWD =
SCHEMA =
SSL_MODE = disable
CHARSET = utf8
PATH = /var/lib/gitea/data/forgejo.db
LOG_SQL = false
[repository]
ROOT = /var/lib/gitea/data/forgejo-repositories
[server]
SSH_DOMAIN = git.benoit.jp.net
DOMAIN = git.benoit.jp.net
HTTP_PORT = 3000
ROOT_URL = https://git.benoit.jp.net/
DISABLE_SSH = false
START_SSH_SERVER = true
SSH_PORT = 22
SSH_LISTEN_HOST = 10.78.127.119
SSH_LISTEN_PORT = 10022
SSH_SERVER_USE_PROXY_PROTOCOL = true
LFS_START_SERVER = true
LFS_JWT_SECRET =
OFFLINE_MODE = false
[lfs]
PATH = /var/lib/gitea/data/lfs
[mailer]
ENABLED = true
FROM = git-no-reply@benoit.jp.net
MAILER_TYPE = smtp
SMTP_ADDR = mail.benoit.jp.net
SMTP_PORT = 465
IS_TLS_ENABLED = true
USER = git-no-reply@benoit.jp.net
PASSWD =
[service]
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
DISABLE_REGISTRATION = true
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = false
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.benoit.jp.net
[openid]
ENABLE_OPENID_SIGNIN = false
ENABLE_OPENID_SIGNUP = false
[session]
PROVIDER = file
[log]
MODE = console
LEVEL = info
ROOT_PATH = /var/lib/gitea/log
ROUTER = console
[repository.pull-request]
DEFAULT_MERGE_STYLE = merge
[repository.signing]
DEFAULT_TRUST_MODEL = committer
[security]
INSTALL_LOCK = true
INTERNAL_TOKEN =
PASSWORD_HASH_ALGO = argon2
[oauth2]
JWT_SECRET =
EOT

View file

@ -0,0 +1,265 @@
name 'gtw'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_system::hostname
esh_system::sshd
esh_ufw::rules
esh_wireguard::server
esh_haproxy::config
esh_letsencrypt::snap
esh_letsencrypt::certs
esh_letsencrypt::serve
)
###
# Attributes
###
###
# esh_system
###
default['esh']['system']['hostname']['fqdn'] = 'gtw.benoit.jp.net'
default['esh']['system']['sshd']['port'] = '28'
default['esh']['system']['sshd']['permitrootlogin'] = 'prohibit-password'
default['esh']['system']['sshd']['passwordauthentication'] = 'no'
default['esh']['system']['sshd']['maxauthtries'] = '3'
default['esh']['system']['sshd']['maxsessions'] = '5'
default['esh']['system']['sshd']['otp'] = false
###
# esh_ufw
###
default['esh']['ufw']['rules']['list'] = [
'limit from any to any port 22',
'limit from any to any port 28',
'allow from any to any port 25',
'allow from any to any port 80',
'allow from any to any port 443',
'allow from any to any port 465',
'allow from any to any port 853',
'allow from any to any port 993',
'allow from any to any port 4190',
'allow from 10.10.10.3 to 10.10.10.1 port 8898',
'allow from any to any port 51820',
]
###
# esh_wireguard
###
default['esh']['wireguard']['server']['privkey'] = '='
default['esh']['wireguard']['server']['pubkey'] = '3JJ00aMP/1mPJeUW0sci4dIK4S4XBiTWWaBgZgq+LCQ='
default['esh']['wireguard']['server']['address'] = '10.10.10.1/24, fdaf:345d:a5fc::1/64'
default['esh']['wireguard']['server']['listenport'] = '51820'
default['esh']['wireguard']['server']['pubint'] = 'enp1s0'
default['esh']['wireguard']['server']['routing'] = true
default['esh']['wireguard']['server']['peers'] = {
'3HNAZfx02qnpw2Tglrjs7KEnO3lUz1SZ/xUZUYGV6mo=': '10.10.10.3,fdaf:345d:a5fc::3,10.78.127.0/24,fd42:d7a4:755b:7893::/64',
'agIabJemiFUD+u8BCNmyO2PIgg2SGjQX573AIIkgExs=': '10.10.10.6,fdaf:345d:a5fc::6,10.121.231.1/24,fd42:4a26:3578:a318::1/64',
}
#'8j2fzeFgxk33a+cDemZluPAxlRN21bdmTMHVpayIhQg=': '10.10.10.4,fdaf:345d:a5fc::4,10.78.127.0/24,fd42:d7a4:755b:7893::/64',
#'2o41xCeNiUsfRMFg+fvbRIqTdAWjdPptMu8aRnZ3zyk=': '10.10.10.5'
###
# esh_lestencrypt
###
default['esh']['letsencrypt']['certs']['email'] = 'certbot@benpro.fr'
default['esh']['letsencrypt']['certs']['list'] = [
]
default['esh']['letsencrypt']['serve']['auth'] = ''
default['esh']['letsencrypt']['serve']['miniserve_url'] = 'https://github.com/svenstaro/miniserve/releases/download/v0.22.0/miniserve-0.22.0-x86_64-unknown-linux-gnu'
default['esh']['letsencrypt']['serve']['listen'] = '10.10.10.1'
###
# esh_haproxy
###
default['esh']['haproxy']['config']['stats_password'] = ''
default['esh']['haproxy']['config']['listen'] = {
'ssh': {
'bind': '22',
'mode': 'tcp',
'server': 'git-ssh 10.78.127.119:10022 send-proxy',
},
'smtp': {
'bind': '25',
'mode': 'tcp',
'server': 'mail 10.78.127.231:10025 send-proxy',
},
'smtps': {
'bind': '465',
'mode': 'tcp',
'server': 'mail 10.78.127.231:10465 send-proxy',
},
'imaps': {
'bind': '993',
'mode': 'tcp',
'server': 'mail 10.78.127.231:10993 send-proxy',
},
'sieve': {
'bind': '4190',
'mode': 'tcp',
'server': 'mail 10.78.127.231:14190 send-proxy',
},
'adguard-dot': {
'bind': '853',
'mode': 'tcp',
'server': 'adguard 10.78.127.201:10853 send-proxy',
},
}
default['esh']['haproxy']['config']['acls'] = {
'mail': {
'hosts': [
'mail.benoit.jp.net',
],
'denies': [
'!JP !letsencrypt',
],
'backend': 'mail',
},
'archive': {
'hosts': [
'blog.benpro.fr.archive.benoit.jp.net',
'lekernelpanique.fr.archive.benoit.jp.net',
'sysadmin-bookmarks.archive.benoit.jp.net',
],
'denies': [],
'backend': 'archive',
},
'mkdocs': {
'hosts': [
'www.benoit.jp.net',
'benoit.jp.net',
],
'denies': [],
'backend': 'mkdocs',
},
'mkdocs-laminar': {
'hosts': [
'laminar.benoit.jp.net',
],
'denies': [],
'backend': 'mkdocs-laminar',
},
'mkdocs-webhook': {
'hosts': [
'webhook.benoit.jp.net',
],
'denies': [],
'backend': 'mkdocs-webhook',
},
'flux': {
'hosts': [
'flux.benoit.jp.net',
],
'denies': [
'!JP !letsencrypt'
],
'backend': 'flux',
},
'dns': {
'hosts': [
'dns.benoit.jp.net',
'tangorpro.dns.benoit.jp.net',
'bluejay.dns.benoit.jp.net',
],
'denies': [
'!JP !SG !letsencrypt'
],
'backend': 'dns',
},
'git': {
'hosts': [
'git.benoit.jp.net',
],
'denies': [],
'backend': 'git',
},
'photos': {
'hosts': [
'photos.benoit.jp.net',
],
'denies': [
'!JP !FR !letsencrypt',
],
'backend': 'photos',
},
'kb': {
'hosts': [
'kb.benoit.jp.net',
],
'denies': [
'!JP !letsencrypt',
],
'backend': 'kb',
},
'pwd': {
'hosts': [
'pwd.benoit.jp.net',
],
'denies': [
'!JP !letsencrypt',
],
'backend': 'pwd',
},
'risanokyoku': {
'hosts': [
'risanokyoku.benoit.jp.net',
],
'denies': [
'!JP !letsencrypt',
],
'backend': 'risanokyoku',
},
'ytb': {
'hosts': [
'ytb.benoit.jp.net',
'ytb-proxy.benoit.jp.net',
'ytb-api.benoit.jp.net',
],
'denies': [
'!JP !letsencrypt',
],
'backend': 'ytb',
},
}
default['esh']['haproxy']['config']['backends'] = {
'archive': 'archive 10.78.127.252:80 check',
'dns': 'dns 10.78.127.201:443 check ssl verify none',
'flux': 'flux 10.78.127.111:8080 check',
'git': 'git 10.78.127.119:3000 check',
'kb': 'kb 10.78.127.127:80 check',
'mail': 'mail 10.78.127.231:80 check',
'mkdocs': 'mkdocs 10.78.127.73:80 check',
'mkdocs-laminar': 'mkdocs-laminar 10.78.127.73:8080 check',
'mkdocs-webhook': 'mkdocs-webhook 10.78.127.73:9000 check',
'photos': 'photos 10.78.127.121:2342 check',
'pwd': 'pwd 10.78.127.195:80 check',
'risanokyoku': 'risanokyoku 10.121.231.3:4533 check',
'ytb': 'ytb 10.78.127.55:8080 check',
}
default['esh']['haproxy']['config']['maxmind_key'] = ''
default['esh']['haproxy']['config']['hc_url'] = 'https://hc-ping.com/'

View file

@ -0,0 +1,14 @@
dst := 'none'
policy := 'none'
oneshot: update && export rsync
update:
cinc-cli update {{policy}}.rb
export:
cinc-cli export {{policy}}.lock.json ../exported-policies/{{policy}} --force
rsync:
rsync -a --delete --ignore-errors ../exported-policies/{{policy}} {{dst}}

316
cinc-repo/policyfiles/kb.rb Normal file
View file

@ -0,0 +1,316 @@
name 'kb'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_kanboard::default
)
###
# Attributes
###
###
# esh_kanboard
###
default['esh']['kanboard']['version'] = '1.2.34'
default['esh']['kanboard']['config'] = <<~EOT
<?php
/*******************************************************************/
/* Rename this file to config.php if you want to change the values */
/* */
/* Make sure all paths are absolute by using __DIR__ where needed */
/*******************************************************************/
// Data folder (must be writeable by the web server user and absolute)
define('DATA_DIR', __DIR__.DIRECTORY_SEPARATOR.'data');
// Enable/Disable debug
define('DEBUG', false);
// Available log drivers: syslog, stderr, stdout, system or file
define('LOG_DRIVER', 'system');
// Log filename if the log driver is "file"
define('LOG_FILE', DATA_DIR.DIRECTORY_SEPARATOR.'debug.log');
// Plugins directory
define('PLUGINS_DIR', __DIR__.DIRECTORY_SEPARATOR.'plugins');
// Plugins directory URL
define('PLUGIN_API_URL', 'https://kanboard.org/plugins.json');
// Enable/Disable plugin installer (Disabled by default for security reasons)
// There is no code review or any approval process to submit a plugin.
// This is up to the Kanboard instance owner to validate if a plugin is legit.
define('PLUGIN_INSTALLER', false);
// Available cache drivers are "file" and "memory"
define('CACHE_DRIVER', 'memory');
// Cache folder to use if cache driver is "file" (must be writeable by the web server user)
define('CACHE_DIR', DATA_DIR.DIRECTORY_SEPARATOR.'cache');
// Folder for uploaded files (must be writeable by the web server user)
define('FILES_DIR', DATA_DIR.DIRECTORY_SEPARATOR.'files');
// Enable/disable email configuration from the user interface
define('MAIL_CONFIGURATION', true);
// E-mail address used for the "From" header (notifications)
define('MAIL_FROM', 'kb-no-reply@kb.benoit.jp.net');
// E-mail address used for the "Bcc" header to send a copy of all notifications
define('MAIL_BCC', '');
// Mail transport available: "smtp", "sendmail", "mail" (PHP mail function)
define('MAIL_TRANSPORT', 'smtp');
// SMTP configuration to use when the "smtp" transport is chosen
define('MAIL_SMTP_HOSTNAME', 'mail.benoit.jp.net');
define('MAIL_SMTP_PORT', '465');
define('MAIL_SMTP_USERNAME', 'kb-no-reply@benoit.jp.net');
define('MAIL_SMTP_PASSWORD', '');
define('MAIL_SMTP_HELO_NAME', 'kb.benoit.jp.net'); // valid: null (default), or FQDN
define('MAIL_SMTP_ENCRYPTION', 'ssl'); // Valid values are null (not a string "null"), "ssl" or "tls"
// Sendmail command to use when the transport is "sendmail"
define('MAIL_SENDMAIL_COMMAND', '/usr/sbin/sendmail -bs');
// Run automatically database migrations
// If set to false, you will have to run manually the SQL migrations from the CLI during the next Kanboard upgrade
// Do not run the migrations from multiple processes at the same time (example: web page + background worker)
define('DB_RUN_MIGRATIONS', true);
// Database driver: sqlite, mysql, postgres, odbc, dblib, or mssql (sqlite by default)
define('DB_DRIVER', 'sqlite');
// Database username
define('DB_USERNAME', 'root');
// Database password
define('DB_PASSWORD', '');
// Database hostname
define('DB_HOSTNAME', 'localhost');
// Database database name
define('DB_NAME', 'kanboard');
// Database custom port (null = default port)
define('DB_PORT', null);
// Mysql SSL key
define('DB_SSL_KEY', null);
// Mysql SSL certificate
define('DB_SSL_CERT', null);
// Mysql SSL CA
define('DB_SSL_CA', null);
// Mysql SSL server verification, set to false if you don't want the Mysql driver to validate the certificate CN
define('DB_VERIFY_SERVER_CERT', null);
// Timeout value for PDO attribute
define('DB_TIMEOUT', null);
// ODBC DSN (default: kanboard)
define('DB_ODBC_DSN', 'kanboard');
// Enable LDAP authentication (false by default)
define('LDAP_AUTH', false);
// LDAP server protocol, hostname and port URL (ldap[s]://hostname:port)
define('LDAP_SERVER', '');
// By default, require certificate to be verified for ldaps:// style URL. Set to false to skip the verification
define('LDAP_SSL_VERIFY', true);
// Enable LDAP START_TLS
define('LDAP_START_TLS', false);
// By default Kanboard lowercase the ldap username to avoid duplicate users (the database is case sensitive)
// Set to true if you want to preserve the case
define('LDAP_USERNAME_CASE_SENSITIVE', false);
// LDAP bind type: "anonymous", "user" or "proxy"
define('LDAP_BIND_TYPE', 'anonymous');
// LDAP username to use with proxy mode
// LDAP username pattern to use with user mode
define('LDAP_USERNAME', null);
// LDAP password to use for proxy mode
define('LDAP_PASSWORD', null);
// LDAP DN for users
// Example for ActiveDirectory: CN=Users,DC=kanboard,DC=local
// Example for OpenLDAP: ou=People,dc=example,dc=com
define('LDAP_USER_BASE_DN', '');
// LDAP pattern to use when searching for a user account
// Example for ActiveDirectory: '(&(objectClass=user)(sAMAccountName=%s))'
// Example for OpenLDAP: 'uid=%s'
define('LDAP_USER_FILTER', '');
// LDAP attribute for username
// Example for ActiveDirectory: 'sAMAccountName'
// Example for OpenLDAP: 'uid'
define('LDAP_USER_ATTRIBUTE_USERNAME', 'uid');
// LDAP attribute for user full name
// Example for ActiveDirectory: 'displayname'
// Example for OpenLDAP: 'cn'
define('LDAP_USER_ATTRIBUTE_FULLNAME', 'cn');
// LDAP attribute for user email
define('LDAP_USER_ATTRIBUTE_EMAIL', 'mail');
// LDAP attribute to find groups in user profile
define('LDAP_USER_ATTRIBUTE_GROUPS', 'memberof');
// LDAP attribute for user avatar image: thumbnailPhoto or jpegPhoto
define('LDAP_USER_ATTRIBUTE_PHOTO', '');
// LDAP attribute for user language, example: 'preferredlanguage'
// Put an empty string to disable language sync
define('LDAP_USER_ATTRIBUTE_LANGUAGE', '');
// Automatically create a user profile when a user authenticates via LDAP.
// If set to false, only LDAP users can log in for whom a Kanboard profile already exists.
define('LDAP_USER_CREATION', true);
// Set new user as Manager
define('LDAP_USER_DEFAULT_ROLE_MANAGER', false);
// LDAP DN for administrators
// Example: CN=Kanboard-Admins,CN=Users,DC=kanboard,DC=local
define('LDAP_GROUP_ADMIN_DN', '');
// LDAP DN for managers
// Example: CN=Kanboard Managers,CN=Users,DC=kanboard,DC=local
define('LDAP_GROUP_MANAGER_DN', '');
// Enable LDAP group provider for project permissions
// The end-user will be able to browse LDAP groups from the user interface and allow access to specified projects
define('LDAP_GROUP_PROVIDER', false);
// LDAP Base DN for groups
define('LDAP_GROUP_BASE_DN', '');
// LDAP group filter
// Example for ActiveDirectory: (&(objectClass=group)(sAMAccountName=%s*))
define('LDAP_GROUP_FILTER', '');
// LDAP user group filter
// If this filter is configured, Kanboard will search user groups in LDAP_GROUP_BASE_DN with this filter
// Example for OpenLDAP: (&(objectClass=posixGroup)(memberUid=%s))
define('LDAP_GROUP_USER_FILTER', '');
// LDAP attribute for the user in the group filter
// 'username' or 'dn'
define('LDAP_GROUP_USER_ATTRIBUTE', 'username');
// LDAP attribute for the group name
define('LDAP_GROUP_ATTRIBUTE_NAME', 'cn');
// Enable/Disable groups synchronization when external authentication is used.
define('LDAP_GROUP_SYNC', true);
// Enable/disable the reverse proxy authentication
define('REVERSE_PROXY_AUTH', false);
// Header name to use for the username
define('REVERSE_PROXY_USER_HEADER', 'REMOTE_USER');
// Username of the admin, by default blank
define('REVERSE_PROXY_DEFAULT_ADMIN', '');
// Header name to use for the user email
define('REVERSE_PROXY_EMAIL_HEADER', 'REMOTE_EMAIL');
// Header name to use for the user full name
define('REVERSE_PROXY_FULLNAME_HEADER', 'REMOTE_NAME');
// Default domain to use for setting the email address
define('REVERSE_PROXY_DEFAULT_DOMAIN', '');
// Enable/disable remember me authentication
define('REMEMBER_ME_AUTH', true);
// Enable or disable "Strict-Transport-Security" HTTP header
define('ENABLE_HSTS', true);
// Enable or disable "X-Frame-Options: DENY" HTTP header
define('ENABLE_XFRAME', true);
// Escape html inside markdown text
define('MARKDOWN_ESCAPE_HTML', true);
// API alternative authentication header, the default is HTTP Basic Authentication defined in RFC2617
define('API_AUTHENTICATION_HEADER', '');
// Enable/disable url rewrite
define('ENABLE_URL_REWRITE', false);
// Hide login form, useful if all your users use Google/Github/ReverseProxy authentication
define('HIDE_LOGIN_FORM', false);
// Disabling logout (useful for external SSO authentication)
define('DISABLE_LOGOUT', false);
// Enable captcha after 3 authentication failure
define('BRUTEFORCE_CAPTCHA', 3);
// Lock the account after 6 authentication failure
define('BRUTEFORCE_LOCKDOWN', 6);
// Lock account duration in minute
define('BRUTEFORCE_LOCKDOWN_DURATION', 15);
// Session duration in second (0 = until the browser is closed)
// See http://php.net/manual/en/session.configuration.php#ini.session.cookie-lifetime
define('SESSION_DURATION', 0);
// Session handler: db or php
define('SESSION_HANDLER', 'db');
// HTTP client proxy
define('HTTP_PROXY_HOSTNAME', '');
define('HTTP_PROXY_PORT', '3128');
define('HTTP_PROXY_USERNAME', '');
define('HTTP_PROXY_PASSWORD', '');
define('HTTP_PROXY_EXCLUDE', 'localhost');
// Set to false to allow self-signed certificates
define('HTTP_VERIFY_SSL_CERTIFICATE', true);
// TOTP (2FA) issuer name
define('TOTP_ISSUER', 'Kanboard');
// Comma separated list of fields to not synchronize when using external authentication providers
define('EXTERNAL_AUTH_EXCLUDE_FIELDS', 'username');
// Enable or disable displaying group-memberships in userlist (true by default)
define('SHOW_GROUP_MEMBERSHIPS_IN_USERLIST', true);
// Limit number of groups to display in userlist (The full list of group-memberships is always shown, ...
// ... when hovering the mouse over the group-icon of a given user!)
// If set to 0 ALL group-memberships will be listed (7 by default)
define('SHOW_GROUP_MEMBERSHIPS_IN_USERLIST_WITH_LIMIT', 7);
EOT

View file

@ -0,0 +1,253 @@
name 'lxd101'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_system::hostname
esh_system::postfix
esh_wireguard::peer
esh_zfs::package
esh_zfs::pool
esh_zfs::scrub
esh_lxd::setup
esh_lxd::containers
esh_lxd::resolved
esh_zfs::autobackup
esh_borgmatic::setup
)
###
# Attributes
###
###
# esh_system
###
default['esh']['system']['hostname']['fqdn'] = 'lxd101.home.arpa'
###
# esh_wireguard
###
default['esh']['wireguard']['peer']['privkey'] = '='
default['esh']['wireguard']['peer']['pubkey'] = '3HNAZfx02qnpw2Tglrjs7KEnO3lUz1SZ/xUZUYGV6mo='
default['esh']['wireguard']['server']['pubkey'] = '3JJ00aMP/1mPJeUW0sci4dIK4S4XBiTWWaBgZgq+LCQ='
default['esh']['wireguard']['peer']['address'] = '10.10.10.3/24, fdaf:345d:a5fc::3/64'
#default['esh']['wireguard']['peer']['dns'] = '108.61.10.10 2001:19f0:300:1704::6'
default['esh']['wireguard']['peer']['allowedips'] = '0.0.0.0/0, ::0/0'
default['esh']['wireguard']['peer']['endpoint'] = 'gtw.benoit.jp.net:51820'
###
# esh_zfs
###
default['esh']['zfs']['pools'] = {
'nvme': {
'mount_point': 'none',
'ashift': 12,
'autotrim': 'on',
'lz4_compress': 'enabled',
'compression': 'on',
'dedup': 'on',
'target': '/dev/vdb',
},
'hdd': {
'mount_point': 'none',
'ashift': 12,
'autotrim': 'off',
'lz4_compress': 'enabled',
'compression': 'on',
'dedup': 'on',
'target': '/dev/vdc',
},
}
default['esh']['zfs']['scrub']['hc_url'] = 'https://hc-ping.com/'
default['esh']['zfs']['autobackup']['src'] = "#{default['esh']['zfs']['pools'].keys[0]}/custom"
default['esh']['zfs']['autobackup']['dst'] = "#{default['esh']['zfs']['pools'].keys[1]}/custom"
###
# esh_lxd
###
# MTU need to be set to 1340 when using wireguard on the host
# Otherwise, default value of 1500 is fine, no need to set
default['esh']['lxd']['mtu'] = 1340
# mkdocs, need to rename laminar to mkdocs-laminar
default['esh']['lxd']['containers'] = {
'archive': {
'type': 'lxc',
'image': 'alpine/edge',
'volumes': {
'archive-www': {
'pool': 'nvme',
'path': '/var/www',
},
},
},
'git': {
'type': 'lxc',
'cookbook': 'esh_forgejo',
'image': 'ubuntu/jammy',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'git-gitea': {
'pool': 'nvme',
'path': '/var/lib/gitea',
},
'git-mysql': {
'pool': 'nvme',
'path': '/var/lib/mysql',
},
},
},
'kb': {
'type': 'lxc',
'cookbook': 'esh_kanboard',
'image': 'ubuntu/jammy',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'kb-www': {
'pool': 'nvme',
'path': '/var/www',
},
},
},
'pwd': {
'type': 'lxc',
'cookbook': 'esh_vaultwarden',
'image': 'ubuntu/jammy',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'pwd-vaultwarden': {
'pool': 'nvme',
'path': '/var/lib/vaultwarden',
},
},
},
'flux': {
'type': 'lxc',
'cookbook': 'esh_miniflux',
'image': 'ubuntu/jammy',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'flux-postgresql': {
'pool': 'nvme',
'path': '/var/lib/postgresql',
},
},
},
'dns': {
'type': 'lxc',
'cookbook': 'esh_adguard',
'image': 'ubuntu/jammy',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'flux-adguard': {
'pool': 'nvme',
'path': '/var/lib/adguard',
},
},
},
'mkdocs': {
'type': 'lxc',
'cookbook': 'esh_mkdocs',
'image': 'debian/11',
'cinc_flavor': 'debian/11',
'volumes': {
'laminar': {
'pool': 'nvme',
'path': '/var/lib/laminar',
},
},
},
'mail': {
'type': 'vm',
'mem': '4GiB',
'cpu': '4',
'disk': '15GiB',
'cookbook': 'esh_mailcow',
'image': 'ubuntu/jammy/cloud',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'mail-docker': {
'pool': 'nvme',
'path': '/var/lib/docker/volumes',
},
'mail-opt': {
'pool': 'nvme',
'path': '/opt',
},
},
},
'photos': {
'type': 'vm',
'mem': '4GiB',
'cpu': '4',
'disk': '10GiB',
'cookbook': 'esh_photoprism',
'image': 'ubuntu/jammy/cloud',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'photos-originals': {
'pool': 'nvme',
'path': '/var/lib/photos-originals',
},
'photos-storage': {
'pool': 'nvme',
'path': '/var/lib/photos-storage',
},
'photos-mysql': {
'pool': 'nvme',
'path': '/var/lib/mysql',
},
},
},
'ytb': {
'type': 'vm',
'mem': '2GiB',
'cpu': '4',
'disk': '10GiB',
'cookbook': 'esh_piped',
'image': 'ubuntu/jammy/cloud',
'cinc_flavor': 'ubuntu/22.04',
'volumes': {
'ytb-postgresql': {
'pool': 'nvme',
'path': '/var/lib/postgresql',
},
},
},
}
###
# esh_borgmatic
###
default['esh']['borgmatic']['ssh_priv'] = <<~EOT
EOT
default['esh']['borgmatic']['ssh_pub'] = 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEqqdXPFhbZovVg3iBxBjqp+us461HIbKaduldRs587z borgmatic@lxd101'
default['esh']['borgmatic']['config']['repo_passphrase'] = ''
default['esh']['borgmatic']['config']['repo'] = 'iu7hbr8r@iu7hbr8r.repo.borgbase.com:repo'
default['esh']['borgmatic']['config']['location_src'] = "#{default['esh']['zfs']['pools'].keys[1]}/#{default['esh']['zfs']['pools'].keys[0]}/custom"
default['esh']['borgmatic']['config']['healthchecks'] = 'https://hc-ping.com/'
default['esh']['borgmatic']['config']['before_backup'] = [
'lxc exec photos -- sh -c "docker exec -u 999 -t photoprism-mariadb-1 sh -c \'mysqldump -u photoprism --password= --opt --force --hex-blob photoprism > /var/lib/mysql/photoprism.sql\'"',
'lxc exec ytb -- sh -c "docker exec -u 999 -t postgres sh -c \'pg_dump -U piped piped > /var/lib/postgresql/data/piped.sql\'"',
]
default['esh']['borgmatic']['timer'] = '*-*-* 21:00:00'

View file

@ -0,0 +1,183 @@
name 'lxd2204'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_zfs::package
esh_zfs::pool
esh_zfs::scrub
esh_lxd::setup
esh_cloudflared::install
esh_cloudflared::cert
esh_lxd::containers
esh_lxd::resolved
)
###
# Attributes
###
###
# esh_zfs
###
default['esh']['zfs']['pools'] = {
'nvme': {
'mount_point': 'none',
'ashift': 12,
'autotrim': 'on',
'lz4_compress': 'enabled',
'compression': 'on',
'dedup': 'on',
'target': '/dev/vdb',
},
'hdd': {
'mount_point': 'none',
'ashift': 12,
'autotrim': 'off',
'lz4_compress': 'enabled',
'compression': 'on',
'dedup': 'off',
'target': '/dev/vdc',
},
'backup': {
'mount_point': 'none',
'ashift': 12,
'autotrim': 'off',
'lz4_compress': 'enabled',
'compression': 'on',
'dedup': 'on',
'target': '/dev/vdd',
},
}
default['esh']['zfs']['scrub']['hc_url'] = 'https://hc-ping.com/'
###
# esh_lxd
###
default['esh']['lxd']['containers'] = {
'archive': {
'image': 'debian/11',
'volumes': {
'archive-data': {
'pool': 'nvme',
'path': '/var/lib/archive-data',
},
},
'cloudflared': {
'archive': {
'archive.benpro.fr': 'http://archive:80',
},
},
},
'arc': {
'cookbook': 'esh_archivebox',
'image': 'debian/11',
'cinc_flavor': 'debian/11',
'volumes': {
'arc-data': {
'pool': 'nvme',
'path': '/var/lib/arc-data',
},
},
'apparmor': 'unconfined',
'cloudflared': {
'arc': {
'arc.benpro.fr': 'http://arc:8000',
},
},
},
'ytb': {
'cookbook': 'esh_piped',
'image': 'debian/11',
'cinc_flavor': 'debian/11',
'volumes': {
'ytb-postgresql': {
'pool': 'nvme',
'path': '/var/lib/postgresql',
},
},
'apparmor': 'unconfined',
'cloudflared': {
'ytb': {
'ytb.benpro.fr': 'http://ytb:80',
'ytb-proxy.benpro.fr': 'http://ytb:80',
'ytb-api.benpro.fr': 'http://ytb:80',
},
},
},
'blog': {
'cookbook': 'esh_writefreely',
'image': 'debian/11',
'cinc_flavor': 'debian/11',
'volumes': {
'blog-mysql': {
'pool': 'nvme',
'path': '/var/lib/mysql',
},
},
'cloudflared': {
'blog': {
'blog.benpro.fr': 'http://blog:80',
},
},
},
'twt': {
'cookbook': 'esh_nitter',
'image': 'debian/11',
'cinc_flavor': 'debian/11',
'volumes': {},
'cloudflared': {
'twt': {
'twt.benpro.fr': 'http://twt:80',
},
},
},
'gallery': {
'cookbook': 'esh_photoprism',
'image': 'debian/11',
'cinc_flavor': 'debian/11',
'volumes': {
'gallery-originals': {
'pool': 'hdd',
'path': '/var/lib/gallery-originals',
},
'gallery-storage': {
'pool': 'nvme',
'path': '/var/lib/gallery-storage',
},
'gallery-mysql': {
'pool': 'nvme',
'path': '/var/lib/mysql',
},
},
'apparmor': 'unconfined',
'cloudflared': {
'gallery': {
'gallery.benpro.fr': 'http://gallery:80',
'gallery-sync.benpro.fr': 'http://gallery:8384',
},
},
},
}
###
# esh_cloudflared
###
default['esh']['cloudflared']['cert'] = <<~EOT
EOT

View file

@ -0,0 +1,54 @@
name 'mail'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
#esh_system::hostname
run_list %w(
esh_system::postfix
esh_docker::service
esh_mailcow::install
)
###
# Attributes
###
###
# esh_system
###
default['esh']['system']['hostname']['fqdn'] = 'mail.home.arpa'
###
# esh_mailcow
###
default['esh']['mailcow']['install']['fqdn'] = 'mail.benoit.jp.net'
default['esh']['mailcow']['install']['timezone'] = 'Asia/Tokyo'
# Set to 1 for stable updates
# Set to 2 for unstable updates, testing
default['esh']['mailcow']['install']['branch'] = '1'
default['esh']['mailcow']['install']['haproxy'] = true
default['esh']['mailcow']['install']['haproxy_trusted_networks'] = '10.10.10.0/24'
default['esh']['mailcow']['install']['postfix_myhostname'] = 'gtw.benoit.jp.net'
default['esh']['mailcow']['install']['cert_pub'] = 'http://10.10.10.1:8898/mail.benoit.jp.net/fullchain.pem'
default['esh']['mailcow']['install']['cert_priv'] = 'http://10.10.10.1:8898/mail.benoit.jp.net/privkey.pem'
default['esh']['mailcow']['install']['cert_auth'] = ''
default['esh']['mailcow']['install']['clamd'] = false

View file

@ -0,0 +1,39 @@
name 'mkdocs'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_laminar::service
esh_mkdocs::install
esh_webhook::system
esh_webhook::service
)
###
# Attributes
###
###
# esh_laminar
###
default['esh']['laminar']['service']['package'] = 'https://github.com/ohwgiles/laminar/releases/download/1.2/laminar_1.2-1.upstream-debian11_amd64.deb'
###
# esh_webhook
###
default['esh']['webhook']['service']['version'] = '2.8.0'
default['esh']['webhook']['service']['hook_secret'] = ''

View file

@ -0,0 +1,118 @@
name 'photos'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_docker::service
esh_photoprism::system
esh_photoprism::compose
esh_syncthing::service
)
###
# Attributes
###
###
# esh_photoprism
###
PHOTOPRISM_ADMIN_USER = 'benoit'.freeze
PHOTOPRISM_ADMIN_PASSWORD = ''.freeze
PHOTOPRISM_AUTH_MODE = 'password'.freeze
PHOTOPRISM_SITE_URL = 'https://photos.benoit.jp.net'.freeze
PHOTOPRISM_DISABLE_TLS = 'false'.freeze
PHOTOPRISM_DEFAULT_TLS = 'false'.freeze
PHOTOPRISM_ORIGINALS_LIMIT = 5000
PHOTOPRISM_HTTP_COMPRESSION = 'gzip'.freeze
PHOTOPRISM_LOG_LEVEL = 'info'.freeze
PHOTOPRISM_READONLY = 'true'.freeze
PHOTOPRISM_EXPERIMENTAL = 'false'.freeze
PHOTOPRISM_DISABLE_CHOWN = 'true'.freeze
PHOTOPRISM_DISABLE_WEBDAV = 'true'.freeze
PHOTOPRISM_DISABLE_SETTINGS = 'false'.freeze
PHOTOPRISM_DISABLE_TENSORFLOW = 'false'.freeze
PHOTOPRISM_DISABLE_FACES = 'false'.freeze
PHOTOPRISM_DISABLE_CLASSIFICATION = 'false'.freeze
PHOTOPRISM_DISABLE_VECTORS = 'false'.freeze
PHOTOPRISM_DISABLE_RAW = 'false'.freeze
PHOTOPRISM_RAW_PRESETS = 'false'.freeze
PHOTOPRISM_JPEG_QUALITY = 85
PHOTOPRISM_DETECT_NSFW = 'false'.freeze
PHOTOPRISM_UPLOAD_NSFW = 'true'.freeze
PHOTOPRISM_DATABASE_DRIVER = 'mysql'.freeze
PHOTOPRISM_DATABASE_SERVER = 'mariadb:3306'.freeze
PHOTOPRISM_DATABASE_NAME = 'photoprism'.freeze
PHOTOPRISM_DATABASE_USER = 'photoprism'.freeze
PHOTOPRISM_DATABASE_PASSWORD = 'Enrage-Spring-Refill1'.freeze
PHOTOPRISM_SITE_CAPTION = 'Photos by Benoit'.freeze
PHOTOPRISM_SITE_DESCRIPTION = 'Photos by Benoit'.freeze
PHOTOPRISM_SITE_AUTHOR = 'benoit.jp.net'.freeze
PHOTOPRISM_INIT = 'tensorflow'.freeze
PHOTOPRISM_UID = 998
PHOTOPRISM_GID = 998
default['esh']['photoprism']['environment'] = [
"PHOTOPRISM_ADMIN_USER: #{PHOTOPRISM_ADMIN_USER}",
"PHOTOPRISM_ADMIN_PASSWORD: #{PHOTOPRISM_ADMIN_PASSWORD}",
"PHOTOPRISM_AUTH_MODE: #{PHOTOPRISM_AUTH_MODE}",
"PHOTOPRISM_SITE_URL: #{PHOTOPRISM_SITE_URL}",
"PHOTOPRISM_DISABLE_TLS: #{PHOTOPRISM_DISABLE_TLS}",
"PHOTOPRISM_DEFAULT_TLS: #{PHOTOPRISM_DEFAULT_TLS}",
"PHOTOPRISM_ORIGINALS_LIMIT: #{PHOTOPRISM_ORIGINALS_LIMIT}",
"PHOTOPRISM_HTTP_COMPRESSION: #{PHOTOPRISM_HTTP_COMPRESSION}",
"PHOTOPRISM_LOG_LEVEL: #{PHOTOPRISM_LOG_LEVEL}",
"PHOTOPRISM_READONLY: #{PHOTOPRISM_READONLY}",
"PHOTOPRISM_EXPERIMENTAL: #{PHOTOPRISM_EXPERIMENTAL}",
"PHOTOPRISM_DISABLE_CHOWN: #{PHOTOPRISM_DISABLE_CHOWN}",
"PHOTOPRISM_DISABLE_WEBDAV: #{PHOTOPRISM_DISABLE_WEBDAV}",
"PHOTOPRISM_DISABLE_SETTINGS: #{PHOTOPRISM_DISABLE_SETTINGS}",
"PHOTOPRISM_DISABLE_TENSORFLOW: #{PHOTOPRISM_DISABLE_TENSORFLOW}",
"PHOTOPRISM_DISABLE_FACES: #{PHOTOPRISM_DISABLE_FACES}",
"PHOTOPRISM_DISABLE_CLASSIFICATION: #{PHOTOPRISM_DISABLE_CLASSIFICATION}",
"PHOTOPRISM_DISABLE_VECTORS: #{PHOTOPRISM_DISABLE_VECTORS}",
"PHOTOPRISM_DISABLE_RAW: #{PHOTOPRISM_DISABLE_RAW}",
"PHOTOPRISM_RAW_PRESETS: #{PHOTOPRISM_RAW_PRESETS}",
"PHOTOPRISM_JPEG_QUALITY: #{PHOTOPRISM_JPEG_QUALITY}",
"PHOTOPRISM_DETECT_NSFW: #{PHOTOPRISM_DETECT_NSFW}",
"PHOTOPRISM_UPLOAD_NSFW: #{PHOTOPRISM_UPLOAD_NSFW}",
"PHOTOPRISM_DATABASE_DRIVER: #{PHOTOPRISM_DATABASE_DRIVER}",
"PHOTOPRISM_DATABASE_SERVER: #{PHOTOPRISM_DATABASE_SERVER}",
"PHOTOPRISM_DATABASE_NAME: #{PHOTOPRISM_DATABASE_NAME}",
"PHOTOPRISM_DATABASE_USER: #{PHOTOPRISM_DATABASE_USER}",
"PHOTOPRISM_DATABASE_PASSWORD: #{PHOTOPRISM_DATABASE_PASSWORD}",
"PHOTOPRISM_SITE_CAPTION: #{PHOTOPRISM_SITE_CAPTION}",
"PHOTOPRISM_SITE_DESCRIPTION: #{PHOTOPRISM_SITE_DESCRIPTION}",
"PHOTOPRISM_SITE_AUTHOR: #{PHOTOPRISM_SITE_AUTHOR}",
"PHOTOPRISM_INIT: #{PHOTOPRISM_INIT}",
"PHOTOPRISM_UID: #{PHOTOPRISM_UID}",
"PHOTOPRISM_GID: #{PHOTOPRISM_GID}",
]
default['esh']['photoprism']['volumes'] = %w(
/var/lib/photos-originals:/photoprism/originals
/var/lib/photos-storage:/photoprism/storage
)
default['esh']['photoprism']['mariadb']['volumes'] = %w(
/var/lib/mysql:/var/lib/mysql
)
default['esh']['photoprism']['mariadb']['password'] = PHOTOPRISM_DATABASE_PASSWORD
default['esh']['photoprism']['mariadb']['root_password'] = ''
###
# esh_syncthing
###
default['esh']['syncthing']['service']['user'] = 'photoprism'

View file

@ -0,0 +1,590 @@
name 'pwd'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_vaultwarden::service
)
###
# Attributes
###
###
# esh_vaultwarden
###
default['esh']['vaultwarden']['docker']['image'] = 'docker.io/vaultwarden/server:1.32.4'
default['esh']['vaultwarden']['service']['config'] = <<~EOT
# shellcheck disable=SC2034,SC2148
## Vaultwarden Configuration File
## Uncomment any of the following lines to change the defaults
##
## Be aware that most of these settings will be overridden if they were changed
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
##
## By default, Vaultwarden expects for this file to be named ".env" and located
## in the current working directory. If this is not the case, the environment
## variable ENV_FILE can be set to the location of this file prior to starting
## Vaultwarden.
####################
### Data folders ###
####################
## Main data folder
DATA_FOLDER=/var/lib/vaultwarden
## Individual folders, these override %DATA_FOLDER%
# RSA_KEY_FILENAME=data/rsa_key
# ICON_CACHE_FOLDER=data/icon_cache
# ATTACHMENTS_FOLDER=data/attachments
# SENDS_FOLDER=data/sends
# TMP_FOLDER=data/tmp
## Templates data folder, by default uses embedded templates
## Check source code to see the format
# TEMPLATES_FOLDER=data/templates
## Automatically reload the templates for every request, slow, use only for development
# RELOAD_TEMPLATES=false
## Web vault settings
WEB_VAULT_FOLDER=/opt/undocker/vaultwarden/server/rootfs/web-vault
WEB_VAULT_ENABLED=true
#########################
### Database settings ###
#########################
## Database URL
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
# DATABASE_URL=data/db.sqlite3
## When using MySQL, specify an appropriate connection URI.
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
# DATABASE_URL=mysql://user:password@host[:port]/database_name
## When using PostgreSQL, specify an appropriate connection URI (recommended)
## or keyword/value connection string.
## Details:
## - https://docs.diesel.rs/2.1.x/diesel/pg/struct.PgConnection.html
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
## Enable WAL for the DB
## Set to false to avoid enabling WAL during startup.
## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
## this setting only prevents Vaultwarden from automatically enabling it on start.
## Please read project wiki page about this setting first before changing the value as it can
## cause performance degradation or might render the service unable to start.
# ENABLE_DB_WAL=true
## Database connection retries
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
# DB_CONNECTION_RETRIES=15
## Database timeout
## Timeout when acquiring database connection
# DATABASE_TIMEOUT=30
## Database max connections
## Define the size of the connection pool used for connecting to the database.
# DATABASE_MAX_CONNS=10
## Database connection initialization
## Allows SQL statements to be run whenever a new database connection is created.
## This is mainly useful for connection-scoped pragmas.
## If empty, a database-specific default is used:
## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;"
## - MySQL: ""
## - PostgreSQL: ""
# DATABASE_CONN_INIT=""
#################
### WebSocket ###
#################
## Enable websocket notifications
ENABLE_WEBSOCKET=true
##########################
### Push notifications ###
##########################
## Enables push notifications (requires key and id from https://bitwarden.com/host)
## Details about mobile client push notification:
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-Mobile-Client-push-notification
# PUSH_ENABLED=false
# PUSH_INSTALLATION_ID=CHANGEME
# PUSH_INSTALLATION_KEY=CHANGEME
# WARNING: Do not modify the following settings unless you fully understand their implications!
# Default Push Relay and Identity URIs
# PUSH_RELAY_URI=https://push.bitwarden.com
# PUSH_IDENTITY_URI=https://identity.bitwarden.com
# European Union Data Region Settings
# If you have selected "European Union" as your data region, use the following URIs instead.
# PUSH_RELAY_URI=https://api.bitwarden.eu
# PUSH_IDENTITY_URI=https://identity.bitwarden.eu
#####################
### Schedule jobs ###
#####################
## Job scheduler settings
##
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
## and are always in terms of UTC time (regardless of your local time zone settings).
##
## The schedule format is a bit different from crontab as crontab does not contains seconds.
## You can test the the format here: https://crontab.guru, but remove the first digit!
## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK
## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri"
## "0 30 * * * * "
## "0 30 1 * * * "
##
## How often (in ms) the job scheduler thread checks for jobs that need running.
## Set to 0 to globally disable scheduled jobs.
# JOB_POLL_INTERVAL_MS=30000
##
## Cron schedule of the job that checks for Sends past their deletion date.
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
# SEND_PURGE_SCHEDULE="0 5 * * * *"
##
## Cron schedule of the job that checks for trashed items to delete permanently.
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
##
## Cron schedule of the job that checks for incomplete 2FA logins.
## Defaults to once every minute. Set blank to disable this job.
# INCOMPLETE_2FA_SCHEDULE="30 * * * * *"
##
## Cron schedule of the job that sends expiration reminders to emergency access grantors.
## Defaults to hourly (3 minutes after the hour). Set blank to disable this job.
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 3 * * * *"
##
## Cron schedule of the job that grants emergency access requests that have met the required wait time.
## Defaults to hourly (7 minutes after the hour). Set blank to disable this job.
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 7 * * * *"
##
## Cron schedule of the job that cleans old events from the event table.
## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
## Number of days to retain events stored in the database.
## If unset (the default), events are kept indefinitely and the scheduled job is disabled!
# EVENTS_DAYS_RETAIN=
##
## Cron schedule of the job that cleans old auth requests from the auth request.
## Defaults to every minute. Set blank to disable this job.
# AUTH_REQUEST_PURGE_SCHEDULE="30 * * * * *"
##
## Cron schedule of the job that cleans expired Duo contexts from the database. Does nothing if Duo MFA is disabled or set to use the legacy iframe prompt.
## Defaults to every minute. Set blank to disable this job.
# DUO_CONTEXT_PURGE_SCHEDULE="30 * * * * *"
########################
### General settings ###
########################
## Domain settings
## The domain must match the address from where you access the server
## It's recommended to configure this value, otherwise certain functionality might not work,
## like attachment downloads, email links and U2F.
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
## To use HTTPS, the recommended way is to put Vaultwarden behind a reverse proxy
## Details:
## - https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS
## - https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples
## For development
# DOMAIN=http://localhost
## For public server
DOMAIN=https://pwd.benoit.jp.net
## For public server (URL with port number)
# DOMAIN=https://vw.domain.tld:8443
## For public server (URL with path)
# DOMAIN=https://domain.tld/vw
## Controls whether users are allowed to create Bitwarden Sends.
## This setting applies globally to all users.
## To control this on a per-org basis instead, use the "Disable Send" org policy.
# SENDS_ALLOWED=true
## HIBP Api Key
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
# HIBP_API_KEY=
## Per-organization attachment storage limit (KB)
## Max kilobytes of attachment storage allowed per organization.
## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization.
# ORG_ATTACHMENT_LIMIT=
## Per-user attachment storage limit (KB)
## Max kilobytes of attachment storage allowed per user.
## When this limit is reached, the user will not be allowed to upload further attachments.
# USER_ATTACHMENT_LIMIT=
## Per-user send storage limit (KB)
## Max kilobytes of send storage allowed per user.
## When this limit is reached, the user will not be allowed to upload further sends.
# USER_SEND_LIMIT=
## Number of days to wait before auto-deleting a trashed item.
## If unset (the default), trashed items are not auto-deleted.
## This setting applies globally, so make sure to inform all users of any changes to this setting.
# TRASH_AUTO_DELETE_DAYS=
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
## resulting in an email notification. An incomplete 2FA login is one where the correct
## master password was provided but the required 2FA step was not completed, which
## potentially indicates a master password compromise. Set to 0 to disable this check.
## This setting applies globally to all users.
# INCOMPLETE_2FA_TIME_LIMIT=3
## Disable icon downloading
## Set to true to disable icon downloading in the internal icon service.
## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
## will be deleted eventually, but won't be downloaded again.
# DISABLE_ICON_DOWNLOAD=false
## Controls if new users can register
SIGNUPS_ALLOWED=false
## Controls if new users need to verify their email address upon registration
## Note that setting this option to true prevents logins until the email address has been verified!
## The welcome email will include a verification link, and login attempts will periodically
## trigger another verification email to be sent.
SIGNUPS_VERIFY=true
## If SIGNUPS_VERIFY is set to true, this limits how many seconds after the last time
## an email verification link has been sent another verification email will be sent
SIGNUPS_VERIFY_RESEND_TIME=600
## If SIGNUPS_VERIFY is set to true, this limits how many times an email verification
## email will be re-sent upon an attempted login.
# SIGNUPS_VERIFY_RESEND_LIMIT=6
## Controls if new users from a list of comma-separated domains can register
## even if SIGNUPS_ALLOWED is set to false
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
## Controls whether event logging is enabled for organizations
## This setting applies to organizations.
## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
# ORG_EVENTS_ENABLED=false
## Controls which users can create new orgs.
## Blank or 'all' means all users can create orgs (this is the default):
# ORG_CREATION_USERS=
## 'none' means no users can create orgs:
# ORG_CREATION_USERS=none
## A comma-separated list means only those users can create orgs:
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
## Invitations org admins to invite users, even when signups are disabled
# INVITATIONS_ALLOWED=true
## Name shown in the invitation emails that don't come from a specific organization
# INVITATION_ORG_NAME=Vaultwarden
## The number of hours after which an organization invite token, emergency access invite token,
## email verification token and deletion request token will expire (must be at least 1)
# INVITATION_EXPIRATION_HOURS=120
## Controls whether users can enable emergency access to their accounts.
## This setting applies globally to all users.
# EMERGENCY_ACCESS_ALLOWED=true
## Controls whether users can change their email.
## This setting applies globally to all users
# EMAIL_CHANGE_ALLOWED=true
## Number of server-side passwords hashing iterations for the password hash.
## The default for new users. If changed, it will be updated during login for existing users.
# PASSWORD_ITERATIONS=600000
## Controls whether users can set password hints. This setting applies globally to all users.
# PASSWORD_HINTS_ALLOWED=true
## Controls whether a password hint should be shown directly in the web page if
## SMTP service is not configured. Not recommended for publicly-accessible instances
## as this provides unauthenticated access to potentially sensitive data.
# SHOW_PASSWORD_HINT=false
#########################
### Advanced settings ###
#########################
## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP"
## Set to the string "none" (without quotes), to disable any headers and just use the remote IP
IP_HEADER=X-Forwarded-For
## Icon service
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
##
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
## If an external service is set, an icon request to Vaultwarden will return an HTTP
## redirect to the corresponding icon at the external service. An external service may
## be useful if your Vaultwarden instance has no external network connectivity, or if
## you are concerned that someone may probe your instance to try to detect whether icons
## for certain sites have been cached.
# ICON_SERVICE=internal
## Icon redirect code
## The HTTP status code to use for redirects to an external icon service.
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
## Temporary redirects are useful while testing different icon services, but once a service
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
## are currently better supported by the Bitwarden clients.
# ICON_REDIRECT_CODE=302
## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever")
## Default: 2592000 (30 days)
# ICON_CACHE_TTL=2592000
## Cache time-to-live for icons which weren't available, in seconds (0 is "forever")
## Default: 2592000 (3 days)
# ICON_CACHE_NEGTTL=259200
## Icon download timeout
## Configure the timeout value when downloading the favicons.
## The default is 10 seconds, but this could be to low on slower network connections
# ICON_DOWNLOAD_TIMEOUT=10
## Block HTTP domains/IPs by Regex
## Any domains or IPs that match this regex won't be fetched by the internal HTTP client.
## Useful to hide other servers in the local network. Check the WIKI for more details
## NOTE: Always enclose this regex withing single quotes!
# HTTP_REQUEST_BLOCK_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
## Enabling this will cause the internal HTTP client to refuse to connect to any non global IP address.
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
# HTTP_REQUEST_BLOCK_NON_GLOBAL_IPS=true
## Client Settings
## Enable experimental feature flags for clients.
## This is a comma-separated list of flags, e.g. "flag1,flag2,flag3".
##
## The following flags are available:
## - "autofill-overlay": Add an overlay menu to form fields for quick access to credentials.
## - "autofill-v2": Use the new autofill implementation.
## - "browser-fileless-import": Directly import credentials from other providers without a file.
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
## Require new device emails. When a user logs in an email is required to be sent.
## If sending the email fails the login attempt will fail!!
REQUIRE_DEVICE_EMAIL=true
## Enable extended logging, which shows timestamps and targets in the logs
# EXTENDED_LOGGING=true
## Timestamp format used in extended logging.
## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime
# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f"
## Logging to Syslog
## This requires extended logging
# USE_SYSLOG=false
## Logging to file
# LOG_FILE=/path/to/log
## Log level
## Change the verbosity of the log output
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
## Setting it to "trace" or "debug" would also show logs for mounted routes and static file, websocket and alive requests
## For a specific module append a comma separated `path::to::module=log_level`
## For example, to only see debug logs for icons use: LOG_LEVEL="info,vaultwarden::api::icons=debug"
# LOG_LEVEL=info
## Token for the admin interface, preferably an Argon2 PCH string
## Vaultwarden has a built-in generator by calling `vaultwarden hash`
## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token
## If not set, the admin panel is disabled
## New Argon2 PHC string
## Note that for some environments, like docker-compose you need to escape all the dollar signs `$` with an extra dollar sign like `$$`
## Also, use single quotes (') instead of double quotes (") to enclose the string when needed
## Enable this to bypass the admin panel security. This option is only
## meant to be used with the use of a separate auth layer in front
# DISABLE_ADMIN_TOKEN=false
## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in.
# ADMIN_RATELIMIT_SECONDS=300
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
# ADMIN_RATELIMIT_MAX_BURST=3
## Set the lifetime of admin sessions to this value (in minutes).
# ADMIN_SESSION_LIFETIME=20
## Allowed iframe ancestors (Know the risks!)
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value.
## Multiple values must be separated with a whitespace.
# ALLOWED_IFRAME_ANCESTORS=
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
# LOGIN_RATELIMIT_SECONDS=60
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
# LOGIN_RATELIMIT_MAX_BURST=10
## BETA FEATURE: Groups
## Controls whether group support is enabled for organizations
## This setting applies to organizations.
## Disabled by default because this is a beta feature, it contains known issues!
## KNOW WHAT YOU ARE DOING!
# ORG_GROUPS_ENABLED=false
## Increase secure note size limit (Know the risks!)
## Sets the secure note size limit to 100_000 instead of the default 10_000.
## WARNING: This could cause issues with clients. Also exports will not work on Bitwarden servers!
## KNOW WHAT YOU ARE DOING!
# INCREASE_NOTE_SIZE_LIMIT=false
########################
### MFA/2FA settings ###
########################
## Yubico (Yubikey) Settings
## Set your Client ID and Secret Key for Yubikey OTP
## You can generate it here: https://upgrade.yubico.com/getapikey/
## You can optionally specify a custom OTP server
# YUBICO_CLIENT_ID=11111
# YUBICO_SECRET_KEY=AAAAAAAAAAAAAAAAAAAAAAAA
# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify
## Duo Settings
## You need to configure the DUO_IKEY, DUO_SKEY, and DUO_HOST options to enable global Duo support.
## Otherwise users will need to configure it themselves.
## Create an account and protect an application as mentioned in this link (only the first step, not the rest):
## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account
## Then set the following options, based on the values obtained from the last step:
# DUO_IKEY=<Client ID>
# DUO_SKEY=<Client Secret>
# DUO_HOST=<API Hostname>
## After that, you should be able to follow the rest of the guide linked above,
## ignoring the fields that ask for the values that you already configured beforehand.
##
## If you want to attempt to use Duo's 'Traditional Prompt' (deprecated, iframe based) set DUO_USE_IFRAME to 'true'.
## Duo no longer supports this, but it still works for some integrations.
## If you aren't sure, leave this alone.
# DUO_USE_IFRAME=false
## Email 2FA settings
## Email token size
## Number of digits in an email 2FA token (min: 6, max: 255).
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
# EMAIL_TOKEN_SIZE=6
##
## Token expiration time
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
# EMAIL_EXPIRATION_TIME=600
##
## Maximum attempts before an email token is reset and a new email will need to be sent.
# EMAIL_ATTEMPTS_LIMIT=3
##
## Setup email 2FA regardless of any organization policy
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
## Automatically setup email 2FA as fallback provider when needed
# EMAIL_2FA_AUTO_FALLBACK=false
## Other MFA/2FA settings
## Disable 2FA remember
## Enabling this would force the users to use a second factor to login every time.
## Note that the checkbox would still be present, but ignored.
# DISABLE_2FA_REMEMBER=false
##
## Authenticator Settings
## Disable authenticator time drifted codes to be valid.
## TOTP codes of the previous and next 30 seconds will be invalid
##
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
## we allow by default the TOTP code which was valid one step back and one in the future.
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
## You can disable this, so that only the current TOTP Code is allowed.
## Keep in mind that when a sever drifts out of time, valid codes could be marked as invalid.
## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid.
# AUTHENTICATOR_DISABLE_TIME_DRIFT=false
###########################
### SMTP Email settings ###
###########################
## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service.
## To make sure the email links are pointing to the correct host, set the DOMAIN variable.
## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory
SMTP_HOST=mail.benoit.jp.net
SMTP_FROM=pwd-no-reply@benoit.jp.net
SMTP_FROM_NAME=Vaultwarden
SMTP_USERNAME=pwd-no-reply@benoit.jp.net
SMTP_PASSWORD=
# SMTP_TIMEOUT=15
## Choose the type of secure connection for SMTP. The default is "starttls".
## The available options are:
## - "starttls": The default port is 587.
## - "force_tls": The default port is 465.
## - "off": The default port is 25.
## Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS).
SMTP_SECURITY=force_tls
SMTP_PORT=465
# Whether to send mail via the `sendmail` command
# USE_SENDMAIL=false
# Which sendmail command to use. The one found in the $PATH is used if not specified.
# SENDMAIL_COMMAND="/path/to/sendmail"
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
## Possible values: ["Plain", "Login", "Xoauth2"].
## Multiple options need to be separated by a comma ','.
# SMTP_AUTH_MECHANISM=
## Server name sent during the SMTP HELO
## By default this value should be is on the machine's hostname,
## but might need to be changed in case it trips some anti-spam filters
# HELO_NAME=
## Embed images as email attachments
# SMTP_EMBED_IMAGES=true
## SMTP debugging
## When set to true this will output very detailed SMTP messages.
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
# SMTP_DEBUG=false
## Accept Invalid Certificates
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
## Only use this as a last resort if you are not able to use a valid certificate.
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
# SMTP_ACCEPT_INVALID_CERTS=false
## Accept Invalid Hostnames
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
## Only use this as a last resort if you are not able to use a valid certificate.
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
#######################
### Rocket settings ###
#######################
## Rocket specific settings
## See https://rocket.rs/v0.5/guide/configuration/ for more details.
# ROCKET_ADDRESS=0.0.0.0
## The default port is 8000, unless running in a Docker container, in which case it is 80.
# ROCKET_PORT=8000
# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"}
# vim: syntax=ini
EOT

Binary file not shown.

View file

@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID2DCCA12gAwIBAgISBDuyzASZ8hbkXsQUfhgFG591MAoGCCqGSM49BAMDMDIx
CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF
MTAeFw0yMzEwMjEyMjIyMzhaFw0yNDAxMTkyMjIyMzdaMB0xGzAZBgNVBAMTEm1h
aWwuYmVub2l0LmpwLm5ldDB2MBAGByqGSM49AgEGBSuBBAAiA2IABEX8n92nsxSH
Oh+9cyN5VRfkQwN/kpvrZpNw9EwYw2BXkrdaG9X1fmFXPLipyv+SQiIrxSSQmlPt
dnzxKwm+90UYXFKTbcfD2eAC5NFa03+79tiAAvlrV4wCSawdGCp1zKOCAkkwggJF
MA4GA1UdDwEB/wQEAwIHgDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw
DAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU0gLlNhQwJp2vdLEus/utgvV78YQwHwYD
VR0jBBgwFoAUWvPtK/w2wjd5uVIw6lRvz1XLLqwwVQYIKwYBBQUHAQEESTBHMCEG
CCsGAQUFBzABhhVodHRwOi8vZTEuby5sZW5jci5vcmcwIgYIKwYBBQUHMAKGFmh0
dHA6Ly9lMS5pLmxlbmNyLm9yZy8wUwYDVR0RBEwwSoIYYXV0b2NvbmZpZy5iZW5v
aXQuanAubmV0ghphdXRvZGlzY292ZXIuYmVub2l0LmpwLm5ldIISbWFpbC5iZW5v
aXQuanAubmV0MBMGA1UdIAQMMAowCAYGZ4EMAQIBMIIBAwYKKwYBBAHWeQIEAgSB
9ASB8QDvAHUAO1N3dT4tuYBOizBbBv5AO2fYT8P0x70ADS1yb+H61BcAAAGLVIw7
yAAABAMARjBEAiA9ew86LYJ4jJX7HN1nLqZqZWJpaTmYm3lP6WocRDDQZQIgJeye
vacTjmDGjR8Al8UBWwHAfzhv7OHfoQ9/zEmEjUMAdgDatr9rP7W2Ip+bwrtca+hw
kXFsu1GEhTS9pD0wSNf7qwAAAYtUjD3BAAAEAwBHMEUCIDsX27XHN1/GAdP657K4
hSbEdtoRET8nCZDg6w6BRqgiAiEA/E+BcBk7Yb0zTQs/vhgs1RkUlR6a+vd4c7P9
8fER7zIwCgYIKoZIzj0EAwMDaQAwZgIxAPYYy6DRGI+wabs+1Bt37DsaRKkmnUFE
0JaLvpCtkCIfmLDqMVm/HxgoVciHL1xuqwIxAPZxUikyyjm7TRLQ31WiNBIFHQE/
CNXCUXElEFWp9sO1xeNjhd5l1bNgurKkwpSMWA==
-----END CERTIFICATE-----

View file

@ -0,0 +1,39 @@
name 'twt'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_nitter::redis
esh_nitter::system
esh_nitter::install
esh_nitter::service
)
###
# Attributes
###
###
# esh_nitter
###
default['esh']['nitter']['config']['title'] = 'benpro.fr Nitter instance'
default['esh']['nitter']['config']['hostname'] = 'twt.benpro.fr'
default['esh']['nitter']['config']['hmac_key'] = ''
default['esh']['nitter']['config']['replace_twitter'] = 'twt.benpro.fr'
default['esh']['nitter']['config']['replace_youtube'] = 'ytb.benpro.fr'
default['esh']['nitter']['nginx']['ip_addr'] = '127.0.0.1'
default['esh']['nitter']['nginx']['port'] = '8080'

View file

@ -0,0 +1,34 @@
name 'ytb'
###
# Cookbooks location
###
# ESH
default_source :chef_repo, '../cookbooks'
# Community
default_source :supermarket, 'https://supermarket.chef.io'
###
# Run List
###
run_list %w(
esh_docker::service
esh_piped::compose
)
###
# Attributes
###
###
# esh_piped
###
default['esh']['piped']['config']['proxy_hostname'] = 'ytb-proxy.benoit.jp.net'
default['esh']['piped']['config']['captcha_api_key'] = 'INSERT_HERE'
default['esh']['piped']['config']['backend_hostname'] = 'ytb-api.benoit.jp.net'
default['esh']['piped']['config']['frontend_hostname'] = 'ytb.benoit.jp.net'
default['esh']['piped']['config']['postgresql_password'] = ''

25
esh_adguard/.gitignore vendored Normal file
View file

@ -0,0 +1,25 @@
.vagrant
*~
*#
.#*
\#*#
.*.sw[a-z]
*.un~
# Bundler
Gemfile.lock
gems.locked
bin/*
.bundle/*
# test kitchen
.kitchen/
kitchen.local.yml
# Chef Infra
Berksfile.lock
.zero-knife.rb
Policyfile.lock.json
.idea/

10
esh_adguard/CHANGELOG.md Normal file
View file

@ -0,0 +1,10 @@
# esh_adguard CHANGELOG
This file is used to list changes made in each version of the esh_adguard cookbook.
## 0.1.0
Initial release.
- change 0
- change 1

201
esh_adguard/LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

16
esh_adguard/Policyfile.rb Normal file
View file

@ -0,0 +1,16 @@
# Policyfile.rb - Describe how you want Chef Infra Client to build your system.
#
# For more information on the Policyfile feature, visit
# https://docs.chef.io/policyfile/
# A name that describes what the system you're building with Chef does.
name 'esh_adguard'
# Where to find external cookbooks:
default_source :supermarket
# run_list: chef-client will run these recipes in the order specified.
run_list 'esh_adguard::default'
# Specify a custom source for a single cookbook:
cookbook 'esh_adguard', path: '.'

4
esh_adguard/README.md Normal file
View file

@ -0,0 +1,4 @@
# esh_adguard
TODO: Enter the cookbook description here.

115
esh_adguard/chefignore Normal file
View file

@ -0,0 +1,115 @@
# Put files/directories that should be ignored in this file when uploading
# to a Chef Infra Server or Supermarket.
# Lines that start with '# ' are comments.
# OS generated files #
######################
.DS_Store
ehthumbs.db
Icon?
nohup.out
Thumbs.db
.envrc
# EDITORS #
###########
.#*
.project
.settings
*_flymake
*_flymake.*
*.bak
*.sw[a-z]
*.tmproj
*~
\#*
REVISION
TAGS*
tmtags
.vscode
.editorconfig
## COMPILED ##
##############
*.class
*.com
*.dll
*.exe
*.o
*.pyc
*.so
*/rdoc/
a.out
mkmf.log
# Testing #
###########
.circleci/*
.codeclimate.yml
.delivery/*
.foodcritic
.kitchen*
.mdlrc
.overcommit.yml
.rspec
.rubocop.yml
.travis.yml
.watchr
.yamllint
azure-pipelines.yml
Dangerfile
examples/*
features/*
Guardfile
kitchen.yml*
mlc_config.json
Procfile
Rakefile
spec/*
test/*
# SCM #
#######
.git
.gitattributes
.gitconfig
.github/*
.gitignore
.gitkeep
.gitmodules
.svn
*/.bzr/*
*/.git
*/.hg/*
*/.svn/*
# Berkshelf #
#############
Berksfile
Berksfile.lock
cookbooks/*
tmp
# Bundler #
###########
vendor/*
Gemfile
Gemfile.lock
# Policyfile #
##############
Policyfile.rb
Policyfile.lock.json
# Documentation #
#############
CODE_OF_CONDUCT*
CONTRIBUTING*
documentation/*
TESTING*
UPGRADING*
# Vagrant #
###########
.vagrant
Vagrantfile

View file

@ -0,0 +1,25 @@
# compliance
This directory contains Cinc Auditor profile, waiver and input objects which are used with the Cinc Infra Compliance Phase.
Detailed information on the Cinc Infra Compliance Phase can be found in the [Chef Documentation](https://docs.chef.io/chef_compliance_phase/).
```plain
./compliance
├── inputs
├── profiles
└── waivers
```
Use the `cinc generate` command from Cinc Workstation to create content for these directories:
```sh
# Generate a Cinc Auditor profile
cinc generate profile PROFILE_NAME
# Generate a Cinc Auditor waiver file
cinc generate waiver WAIVER_NAME
# Generate a Cinc Auditor input file
cinc generate input INPUT_NAME
```

31
esh_adguard/kitchen.yml Normal file
View file

@ -0,0 +1,31 @@
---
driver:
name: vagrant
## The forwarded_port port feature lets you connect to ports on the VM guest
## via localhost on the host.
## see also: https://www.vagrantup.com/docs/networking/forwarded_ports
# network:
# - ["forwarded_port", {guest: 80, host: 8080}]
provisioner:
name: chef_zero
## product_name and product_version specifies a specific Chef product and version to install.
## see the Chef documentation for more details: https://docs.chef.io/workstation/config_yml_kitchen/
# product_name: chef
# product_version: 17
verifier:
name: inspec
platforms:
- name: ubuntu-20.04
- name: centos-8
suites:
- name: default
verifier:
inspec_tests:
- test/integration/default

19
esh_adguard/metadata.rb Normal file
View file

@ -0,0 +1,19 @@
name 'esh_adguard'
maintainer 'https://easyself.host'
maintainer_email 'esh@benoit.jp.net'
license 'Apache-2.0'
description 'Installs/Configures esh_adguard'
version '0.1.0'
chef_version '>= 16.0'
# The `issues_url` points to the location where issues for this cookbook are
# tracked. A `View Issues` link will be displayed on this cookbook's page when
# uploaded to a Supermarket.
#
# issues_url 'https://github.com/<insert_org_here>/esh_adguard/issues'
# The `source_url` points to the development repository for this cookbook. A
# `View Source` link will be displayed on this cookbook's page when uploaded to
# a Supermarket.
#
# source_url 'https://github.com/<insert_org_here>/esh_adguard'

View file

@ -0,0 +1,142 @@
#
# Cookbook:: esh_adguard
# Recipe:: default
#
# Copyright:: 2023, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
group 'adguard' do
system true
action :create
end
user 'adguard' do
comment 'adguard system user'
gid 'adguard'
home '/var/lib/adguard'
manage_home true
shell '/usr/bin/bash'
system true
action :create
end
directory '/etc/adguard' do
owner 'adguard'
group 'adguard'
mode '0750'
action :create
end
%w(/var/log/AdGuardHome.out /var/log/AdGuardHome.err).each do |log|
file log do
owner 'adguard'
group 'adguard'
mode '0640'
action :create
end
end
version = node['esh']['adguard']['version']
url = "https://github.com/AdguardTeam/AdGuardHome/releases/download/v#{version}/AdGuardHome_linux_amd64.tar.gz"
remote_file "adguard.#{version}.tar.gz" do
source url
path "#{Chef::Config[:file_cache_path]}/adguard.#{version}.tar.gz"
notifies :run, 'execute[extract adguard]', :immediately
end
execute 'extract adguard' do
command <<~EOT
tar -zxvf \
#{Chef::Config[:file_cache_path]}/adguard.#{version}.tar.gz \
-C /var/lib/adguard \
--strip-components=2 ./AdGuardHome
chown -R adguard: /var/lib/adguard
chmod 750 /var/lib/adguard/AdGuardHome
EOT
action :nothing
notifies :restart, 'service[AdGuardHome]', :delayed
end
username = node['esh']['adguard']['cert_auth'].split(':')[0]
password = node['esh']['adguard']['cert_auth'].split(':')[1]
auth_string = Base64.strict_encode64("#{username}:#{password}")
remote_file '/etc/adguard/fullchain.pem' do
source node['esh']['adguard']['cert_pub']
headers({ 'Authorization' => "Basic #{auth_string}" })
owner 'adguard'
group 'adguard'
mode '0400'
action :create
end
remote_file '/etc/adguard/privkey.pem' do
source node['esh']['adguard']['cert_priv']
headers({ 'Authorization' => "Basic #{auth_string}" })
owner 'adguard'
group 'adguard'
mode '0400'
action :create
end
execute 'setcap AdGuardHome' do
command "setcap 'CAP_NET_BIND_SERVICE=+eip CAP_NET_RAW=+eip' /var/lib/adguard/AdGuardHome"
not_if 'getcap /var/lib/adguard/AdGuardHome | grep -q cap_net_bind_service,cap_net_raw=eip'
action :run
end
execute 'adguard service' do
command '/var/lib/adguard/AdGuardHome -s install'
not_if { ::File.exist?('/etc/systemd/system/AdGuardHome.service') }
action :run
end
directory '/etc/systemd/system/AdGuardHome.service.d' do
owner 'root'
group 'root'
mode '0755'
action :create
end
file '/etc/systemd/system/AdGuardHome.service.d/override.conf' do
content <<~EOT
[Service]
User=adguard
Group=adguard
EOT
owner 'root'
group 'root'
mode '0644'
action :create
notifies :run, 'execute[systemctl daemon-reload]', :immediately
end
execute 'systemctl daemon-reload' do
command 'systemctl daemon-reload'
action :nothing
end
file '/var/lib/adguard/AdGuardHome.yaml' do
content node['esh']['adguard']['config']
owner 'adguard'
group 'adguard'
mode '0640'
action :create
notifies :restart, 'service[AdGuardHome]', :immediately
end
service 'AdGuardHome' do
action :nothing
end

View file

@ -0,0 +1,16 @@
# Chef InSpec test for recipe esh_adguard::default
# The Chef InSpec reference, with examples and extensive documentation, can be
# found at https://docs.chef.io/inspec/resources/
unless os.windows?
# This is an example test, replace with your own test.
describe user('root'), :skip do
it { should exist }
end
end
# This is an example test, replace it with your own test.
describe port(80), :skip do
it { should_not be_listening }
end

25
esh_archivebox/.gitignore vendored Normal file
View file

@ -0,0 +1,25 @@
.vagrant
*~
*#
.#*
\#*#
.*.sw[a-z]
*.un~
# Bundler
Gemfile.lock
gems.locked
bin/*
.bundle/*
# test kitchen
.kitchen/
kitchen.local.yml
# Chef Infra
Berksfile.lock
.zero-knife.rb
Policyfile.lock.json
.idea/

View file

@ -0,0 +1,10 @@
# esh_archivebox CHANGELOG
This file is used to list changes made in each version of the esh_archivebox cookbook.
## 0.1.0
Initial release.
- change 0
- change 1

201
esh_archivebox/LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,16 @@
# Policyfile.rb - Describe how you want Chef Infra Client to build your system.
#
# For more information on the Policyfile feature, visit
# https://docs.chef.io/policyfile/
# A name that describes what the system you're building with Chef does.
name 'esh_archivebox'
# Where to find external cookbooks:
default_source :supermarket
# run_list: chef-client will run these recipes in the order specified.
run_list 'esh_archivebox::default'
# Specify a custom source for a single cookbook:
cookbook 'esh_archivebox', path: '.'

5
esh_archivebox/README.md Normal file
View file

@ -0,0 +1,5 @@
# esh_archivebox
- [Upstream](https://hub.docker.com/r/archivebox/archivebox/tags)
Cookbook is made for tag `0.6.3`

115
esh_archivebox/chefignore Normal file
View file

@ -0,0 +1,115 @@
# Put files/directories that should be ignored in this file when uploading
# to a Chef Infra Server or Supermarket.
# Lines that start with '# ' are comments.
# OS generated files #
######################
.DS_Store
ehthumbs.db
Icon?
nohup.out
Thumbs.db
.envrc
# EDITORS #
###########
.#*
.project
.settings
*_flymake
*_flymake.*
*.bak
*.sw[a-z]
*.tmproj
*~
\#*
REVISION
TAGS*
tmtags
.vscode
.editorconfig
## COMPILED ##
##############
*.class
*.com
*.dll
*.exe
*.o
*.pyc
*.so
*/rdoc/
a.out
mkmf.log
# Testing #
###########
.circleci/*
.codeclimate.yml
.delivery/*
.foodcritic
.kitchen*
.mdlrc
.overcommit.yml
.rspec
.rubocop.yml
.travis.yml
.watchr
.yamllint
azure-pipelines.yml
Dangerfile
examples/*
features/*
Guardfile
kitchen.yml*
mlc_config.json
Procfile
Rakefile
spec/*
test/*
# SCM #
#######
.git
.gitattributes
.gitconfig
.github/*
.gitignore
.gitkeep
.gitmodules
.svn
*/.bzr/*
*/.git
*/.hg/*
*/.svn/*
# Berkshelf #
#############
Berksfile
Berksfile.lock
cookbooks/*
tmp
# Bundler #
###########
vendor/*
Gemfile
Gemfile.lock
# Policyfile #
##############
Policyfile.rb
Policyfile.lock.json
# Documentation #
#############
CODE_OF_CONDUCT*
CONTRIBUTING*
documentation/*
TESTING*
UPGRADING*
# Vagrant #
###########
.vagrant
Vagrantfile

View file

@ -0,0 +1,25 @@
# compliance
This directory contains Chef InSpec profile, waiver and input objects which are used with the Chef Infra Compliance Phase.
Detailed information on the Chef Infra Compliance Phase can be found in the [Chef Documentation](https://docs.chef.io/chef_compliance_phase/).
```plain
./compliance
├── inputs
├── profiles
└── waivers
```
Use the `chef generate` command from Chef Workstation to create content for these directories:
```sh
# Generate a Chef InSpec profile
chef generate profile PROFILE_NAME
# Generate a Chef InSpec waiver file
chef generate waiver WAIVER_NAME
# Generate a Chef InSpec input file
chef generate input INPUT_NAME
```

View file

@ -0,0 +1,31 @@
---
driver:
name: vagrant
## The forwarded_port port feature lets you connect to ports on the VM guest
## via localhost on the host.
## see also: https://www.vagrantup.com/docs/networking/forwarded_ports
# network:
# - ["forwarded_port", {guest: 80, host: 8080}]
provisioner:
name: chef_zero
## product_name and product_version specifies a specific Chef product and version to install.
## see the Chef documentation for more details: https://docs.chef.io/workstation/config_yml_kitchen/
# product_name: chef
# product_version: 17
verifier:
name: inspec
platforms:
- name: ubuntu-20.04
- name: centos-8
suites:
- name: default
verifier:
inspec_tests:
- test/integration/default

View file

@ -0,0 +1,21 @@
name 'esh_archivebox'
maintainer 'https://easyself.host'
maintainer_email 'esh@benpro.fr'
license 'Apache-2.0'
description 'Installs/Configures esh_archivebox'
version '0.1.0'
chef_version '>= 16.0'
supports 'debian', '= 11.0'
depends 'esh_undocker'
# The `issues_url` points to the location where issues for this cookbook are
# tracked. A `View Issues` link will be displayed on this cookbook's page when
# uploaded to a Supermarket.
#
# issues_url 'https://github.com/<insert_org_here>/esh_archivebox/issues'
# The `source_url` points to the development repository for this cookbook. A
# `View Source` link will be displayed on this cookbook's page when uploaded to
# a Supermarket.
#
# source_url 'https://github.com/<insert_org_here>/esh_archivebox'

View file

@ -0,0 +1,104 @@
#
# Cookbook:: esh_photoprism
# Recipe:: compose
#
# Copyright:: 2022, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
directory '/opt/archivebox' do
owner 'root'
group 'root'
mode '0700'
action :create
end
template '/opt/archivebox/docker-compose.yml' do
owner 'root'
group 'root'
mode '0400'
variables volume_data: "/var/lib/#{node['hostname']}-data"
action :create
end
execute 'docker compose pull' do
command 'docker compose pull'
cwd '/opt/archivebox'
live_stream true
action :run
end
apt_package 'expect'
file '/tmp/archivebox-init.expect' do
content <<~EOT
#!/usr/bin/expect -f
set timeout -1
cd /opt/archivebox
spawn /usr/bin/docker compose run archivebox init --setup
expect "Username"
send -- "#{node['esh']['archivebox']['username']}\\r"
expect "Email address:"
send -- "#{node['esh']['archivebox']['email']}\\r"
expect "Password:"
send -- "#{node['esh']['archivebox']['password']}\\r"
expect "Password (again):"
send -- "#{node['esh']['archivebox']['password']}\\r"
expect eof
EOT
owner 'root'
group 'root'
mode '0400'
not_if { ::File.exist?("/var/lib/#{node['hostname']}-data/index.sqlite3")}
notifies :run, 'execute[init archivebox configuration]', :immediately
action :create
end
execute 'init archivebox configuration' do
command 'expect -f /tmp/archivebox-init.expect'
live_stream true
action :nothing
end
systemd_unit 'archivebox.service' do
content <<~EOU
[Unit]
Description=archivebox via docker compose
Requires=docker.service
After=docker.service
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/archivebox
ExecStart=/usr/bin/docker compose up -d
ExecStop=/usr/bin/docker compose down
[Install]
WantedBy=multi-user.target
EOU
action [:create, :enable]
subscribes :restart, 'template[/opt/archivebox/docker-compose.yml]', :delayed
end
service 'archivebox' do
action :nothing
subscribes :start, 'execute[docker compose pull]', :delayed
end

View file

@ -0,0 +1,17 @@
#
# Cookbook:: esh_archivebox
# Recipe:: default
#
# Copyright:: 2022, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,61 @@
#
# Cookbook:: esh_archivebox
# Recipe:: init
#
# Copyright:: 2022, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apt_package %w(systemd-container expect)
service 'archivebox' do
action :stop
not_if { ::File.exist?("/var/lib/#{node['hostname']}-data/index.sqlite3")}
end
file '/tmp/archivebox-init.expect' do
content <<~EOT
#!/usr/bin/expect -f
set timeout -1
spawn systemd-nspawn --oci-bundle=/var/lib/machines/archivebox/ --machine archivebox --hostname archivebox --bind /var/lib/#{node['hostname']}-data:/data --capability=CAP_CHOWN -- /app/bin/docker_entrypoint.sh init --setup
expect "Username (leave blank to use 'root'):"
send -- "#{node['esh']['archivebox']['username']}\\r"
expect "Email address:"
send -- "#{node['esh']['archivebox']['email']}\\r"
expect "Password:"
send -- "#{node['esh']['archivebox']['password']}\\r"
expect "Password (again):"
send -- "#{node['esh']['archivebox']['password']}\\r"
expect eof
EOT
owner 'root'
group 'root'
mode '0400'
not_if { ::File.exist?("/var/lib/#{node['hostname']}-data/index.sqlite3")}
notifies :run, 'execute[init archivebox configuration]', :immediately
action :create
end
execute 'init archivebox configuration' do
command 'expect -f /tmp/archivebox-init.expect'
live_stream true
action :nothing
notifies :start, 'service[archivebox]', :immediately
end

View file

@ -0,0 +1,26 @@
#
# Cookbook:: esh_archivebox
# Recipe:: system
#
# Copyright:: 2022, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The app use 999:999
directory "/var/lib/#{node['hostname']}-data" do
owner 999
group 999
mode '0750'
not_if { ::Dir.exist?("/var/lib/#{node['hostname']}-data") }
action :create
end

View file

@ -0,0 +1,39 @@
#
# Cookbook:: esh_archivebox
# Recipe:: undocker
#
# Copyright:: 2022, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
url = node['esh']['archivebox']['docker']['url']
image = node['esh']['archivebox']['docker']['image']
tag = node['esh']['archivebox']['docker']['tag']
network = node['esh']['archivebox']['docker']['network']
env = node['esh']['archivebox']['docker']['env']
service = node['esh']['archivebox']['docker']['service']
esh_undocker_download url do
image image
tag tag
end
esh_undocker_extract image do
tag tag
network network
env env
end
esh_undocker_service image do
content service
end

View file

@ -0,0 +1,91 @@
# Usage:
# docker-compose run archivebox init --setup
# docker-compose up
# echo "https://example.com" | docker-compose run archivebox archivebox add
# docker-compose run archivebox add --depth=1 https://example.com/some/feed.rss
# docker-compose run archivebox config --set PUBLIC_INDEX=True
# docker-compose run archivebox help
# Documentation:
# https://github.com/ArchiveBox/ArchiveBox/wiki/Docker#docker-compose
version: '2.4'
services:
archivebox:
# build: . # for developers working on archivebox
image: ${DOCKER_IMAGE:-archivebox/archivebox:master}
command: server --quick-init 0.0.0.0:8000
ports:
- 8000:8000
environment:
- ALLOWED_HOSTS=* # add any config options you want as env vars
- MEDIA_MAX_SIZE=750m
# - SEARCH_BACKEND_ENGINE=sonic # uncomment these if you enable sonic below
# - SEARCH_BACKEND_HOST_NAME=sonic
# - SEARCH_BACKEND_PASSWORD=SecretPassword
volumes:
- <%= @volume_data %>:/data
# - ./archivebox:/app/archivebox # for developers working on archivebox
# To run the Sonic full-text search backend, first download the config file to sonic.cfg
# curl -O https://raw.githubusercontent.com/ArchiveBox/ArchiveBox/master/etc/sonic.cfg
# after starting, backfill any existing Snapshots into the index: docker-compose run archivebox update --index-only
# sonic:
# image: valeriansaliou/sonic:v1.3.0
# expose:
# - 1491
# environment:
# - SEARCH_BACKEND_PASSWORD=SecretPassword
# volumes:
# - ./sonic.cfg:/etc/sonic.cfg:ro
# - ./data/sonic:/var/lib/sonic/store
### Optional Addons: tweak these examples as needed for your specific use case
# Example: Run scheduled imports in a docker instead of using cron on the
# host machine, add tasks and see more info with archivebox schedule --help
# scheduler:
# image: archivebox/archivebox:latest
# command: schedule --foreground --every=day --depth=1 'https://getpocket.com/users/USERNAME/feed/all'
# environment:
# - USE_COLOR=True
# - SHOW_PROGRESS=False
# volumes:
# - ./data:/data
# Example: Put Nginx in front of the ArchiveBox server for SSL termination
# nginx:
# image: nginx:alpine
# ports:
# - 443:443
# - 80:80
# volumes:
# - ./etc/nginx/nginx.conf:/etc/nginx/nginx.conf
# - ./data:/var/www
# Example: run all your ArchiveBox traffic through a WireGuard VPN tunnel
# wireguard:
# image: linuxserver/wireguard
# network_mode: 'service:archivebox'
# cap_add:
# - NET_ADMIN
# - SYS_MODULE
# sysctls:
# - net.ipv4.conf.all.rp_filter=2
# - net.ipv4.conf.all.src_valid_mark=1
# volumes:
# - /lib/modules:/lib/modules
# - ./wireguard.conf:/config/wg0.conf:ro
# Example: Run PYWB in parallel and auto-import WARCs from ArchiveBox
# pywb:
# image: webrecorder/pywb:latest
# entrypoint: /bin/sh 'wb-manager add default /archivebox/archive/*/warc/*.warc.gz; wayback --proxy;'
# environment:
# - INIT_COLLECTION=archivebox
# ports:
# - 8080:8080
# volumes:
# ./data:/archivebox
# ./data/wayback:/webarchive

View file

@ -0,0 +1,16 @@
# Chef InSpec test for recipe esh_archivebox::default
# The Chef InSpec reference, with examples and extensive documentation, can be
# found at https://docs.chef.io/inspec/resources/
unless os.windows?
# This is an example test, replace with your own test.
describe user('root'), :skip do
it { should exist }
end
end
# This is an example test, replace it with your own test.
describe port(80), :skip do
it { should_not be_listening }
end

View file

@ -0,0 +1,32 @@
# Delivery for Local Phases Execution
#
# This file allows you to execute test phases locally on a workstation or
# in a CI pipeline. The delivery-cli will read this file and execute the
# command(s) that are configured for each phase. You can customize them
# by just modifying the phase key on this file.
#
# By default these phases are configured for Cookbook Workflow only
#
[local_phases]
unit = "echo skipping unit phase."
lint = "chef exec cookstyle"
# foodcritic has been deprecated in favor of cookstyle so we skip the syntax
# phase now.
syntax = "echo skipping syntax phase. Use lint phase instead."
provision = "chef exec kitchen create"
deploy = "chef exec kitchen converge"
smoke = "chef exec kitchen verify"
# The functional phase is optional, you can define it by uncommenting
# the line below and running the command: `delivery local functional`
# functional = ""
cleanup = "chef exec kitchen destroy"
# Remote project.toml file
#
# Instead of the local phases above, you may specify a remote URI location for
# the `project.toml` file. This is useful for teams that wish to centrally
# manage the behavior of the `delivery local` command across many different
# projects.
#
# remote_file = "https://url/project.toml"

25
esh_borgmatic/.gitignore vendored Normal file
View file

@ -0,0 +1,25 @@
.vagrant
*~
*#
.#*
\#*#
.*.sw[a-z]
*.un~
# Bundler
Gemfile.lock
gems.locked
bin/*
.bundle/*
# test kitchen
.kitchen/
kitchen.local.yml
# Chef Infra
Berksfile.lock
.zero-knife.rb
Policyfile.lock.json
.idea/

View file

@ -0,0 +1,10 @@
# esh_borgmatic CHANGELOG
This file is used to list changes made in each version of the esh_borgmatic cookbook.
## 0.1.0
Initial release.
- change 0
- change 1

201
esh_borgmatic/LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,16 @@
# Policyfile.rb - Describe how you want Chef Infra Client to build your system.
#
# For more information on the Policyfile feature, visit
# https://docs.chef.io/policyfile/
# A name that describes what the system you're building with Chef does.
name 'esh_borgmatic'
# Where to find external cookbooks:
default_source :supermarket
# run_list: chef-client will run these recipes in the order specified.
run_list 'esh_borgmatic::default'
# Specify a custom source for a single cookbook:
cookbook 'esh_borgmatic', path: '.'

4
esh_borgmatic/README.md Normal file
View file

@ -0,0 +1,4 @@
# esh_borgmatic
TODO: Enter the cookbook description here.

115
esh_borgmatic/chefignore Normal file
View file

@ -0,0 +1,115 @@
# Put files/directories that should be ignored in this file when uploading
# to a Chef Infra Server or Supermarket.
# Lines that start with '# ' are comments.
# OS generated files #
######################
.DS_Store
ehthumbs.db
Icon?
nohup.out
Thumbs.db
.envrc
# EDITORS #
###########
.#*
.project
.settings
*_flymake
*_flymake.*
*.bak
*.sw[a-z]
*.tmproj
*~
\#*
REVISION
TAGS*
tmtags
.vscode
.editorconfig
## COMPILED ##
##############
*.class
*.com
*.dll
*.exe
*.o
*.pyc
*.so
*/rdoc/
a.out
mkmf.log
# Testing #
###########
.circleci/*
.codeclimate.yml
.delivery/*
.foodcritic
.kitchen*
.mdlrc
.overcommit.yml
.rspec
.rubocop.yml
.travis.yml
.watchr
.yamllint
azure-pipelines.yml
Dangerfile
examples/*
features/*
Guardfile
kitchen.yml*
mlc_config.json
Procfile
Rakefile
spec/*
test/*
# SCM #
#######
.git
.gitattributes
.gitconfig
.github/*
.gitignore
.gitkeep
.gitmodules
.svn
*/.bzr/*
*/.git
*/.hg/*
*/.svn/*
# Berkshelf #
#############
Berksfile
Berksfile.lock
cookbooks/*
tmp
# Bundler #
###########
vendor/*
Gemfile
Gemfile.lock
# Policyfile #
##############
Policyfile.rb
Policyfile.lock.json
# Documentation #
#############
CODE_OF_CONDUCT*
CONTRIBUTING*
documentation/*
TESTING*
UPGRADING*
# Vagrant #
###########
.vagrant
Vagrantfile

31
esh_borgmatic/kitchen.yml Normal file
View file

@ -0,0 +1,31 @@
---
driver:
name: vagrant
## The forwarded_port port feature lets you connect to ports on the VM guest
## via localhost on the host.
## see also: https://www.vagrantup.com/docs/networking/forwarded_ports
# network:
# - ["forwarded_port", {guest: 80, host: 8080}]
provisioner:
name: chef_zero
## product_name and product_version specifies a specific Chef product and version to install.
## see the Chef documentation for more details: https://docs.chef.io/workstation/config_yml_kitchen/
# product_name: chef
# product_version: 17
verifier:
name: inspec
platforms:
- name: ubuntu-20.04
- name: centos-8
suites:
- name: default
verifier:
inspec_tests:
- test/integration/default

19
esh_borgmatic/metadata.rb Normal file
View file

@ -0,0 +1,19 @@
name 'esh_borgmatic'
maintainer 'https://easyself.host'
maintainer_email 'esh@benpro.fr'
license 'Apache-2.0'
description 'Installs/Configures esh_borgmatic'
version '0.1.0'
chef_version '>= 16.0'
# The `issues_url` points to the location where issues for this cookbook are
# tracked. A `View Issues` link will be displayed on this cookbook's page when
# uploaded to a Supermarket.
#
# issues_url 'https://github.com/<insert_org_here>/esh_borgmatic/issues'
# The `source_url` points to the development repository for this cookbook. A
# `View Source` link will be displayed on this cookbook's page when uploaded to
# a Supermarket.
#
# source_url 'https://github.com/<insert_org_here>/esh_borgmatic'

View file

@ -0,0 +1,17 @@
#
# Cookbook:: esh_borgmatic
# Recipe:: default
#
# Copyright:: 2023, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,122 @@
#
# Cookbook:: esh_borgmatic
# Recipe:: setup
#
# Copyright:: 2023, https://easyself.host
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
repo_passphrase = node['esh']['borgmatic']['config']['repo_passphrase']
repo = node['esh']['borgmatic']['config']['repo']
location_src = node['esh']['borgmatic']['config']['location_src']
before_backup = node['esh']['borgmatic']['config']['before_backup']
healthchecks = node['esh']['borgmatic']['config']['healthchecks']
file '/root/.ssh/borgmatic' do
content node['esh']['borgmatic']['ssh_priv']
owner 'root'
group 'root'
mode '0400'
action :create
end
file '/root/.ssh/borgmatic.pub' do
content node['esh']['borgmatic']['ssh_pub']
owner 'root'
group 'root'
mode '0400'
action :create
end
apt_package 'borgmatic'
execute 'trust the borg repo' do
command <<~EOT
ssh-keyscan #{repo.split('@')[1].split(':')[0]} >> /root/.ssh/known_hosts
EOT
not_if <<~EOT
grep -q #{repo.split('@')[1].split(':')[0]} /root/.ssh/known_hosts
EOT
action :run
end
directory '/etc/borgmatic' do
owner 'root'
group 'root'
mode '0700'
action :create
end
template '/etc/borgmatic/config.yaml' do
owner 'root'
group 'root'
mode '0400'
variables location_src: location_src,
repo: repo,
repo_passphrase: repo_passphrase,
before_backup: before_backup,
healthchecks: healthchecks
action :create
end
systemd_unit 'borgmatic.service' do
content <<~EOU
[Unit]
Description=borgmatic backup
Wants=network-online.target
After=network-online.target
# Prevent borgmatic from running unless the machine is plugged into power. Remove this line if you
# want to allow borgmatic to run anytime.
ConditionACPower=true
ConditionFileNotEmpty=/etc/borgmatic/config.yaml
Documentation=https://torsion.org/borgmatic/
[Service]
Type=oneshot
# Lower CPU and I/O priority.
Nice=19
CPUSchedulingPolicy=batch
IOSchedulingClass=best-effort
IOSchedulingPriority=7
IOWeight=100
Restart=no
# Prevent rate limiting of borgmatic log events. If you are using an older version of systemd that
# doesn't support this (pre-240 or so), you may have to remove this option.
LogRateLimitIntervalSec=0
# Delay start to prevent backups running during boot. Note that systemd-inhibit requires dbus and
# dbus-user-session to be installed.
ExecStartPre=sleep 1m
ExecStart=systemd-inhibit --who="borgmatic" --why="Prevent interrupting scheduled backup" /usr/bin/borgmatic --verbosity -1 --syslog-verbosity 1
EOU
action [:create, :enable]
end
systemd_unit 'borgmatic.timer' do
content <<~EOU
[Unit]
Description=Run borgmatic backup
[Timer]
OnCalendar=#{node['esh']['borgmatic']['timer']}
Persistent=true
[Install]
WantedBy=timers.target
EOU
verify false
action [:create, :enable]
end

View file

@ -0,0 +1,54 @@
location:
source_directories:
- /var/backups/<%= @location_src %>
repositories:
- <%= @repo %>
exclude_patterns:
- '.zfs'
one_file_system: false
storage:
compression: auto,zstd
encryption_passphrase: <%= @repo_passphrase %>
archive_name_format: "{hostname}-{now:%Y-%m-%d-%H%M%S}"
ssh_command: ssh -i /root/.ssh/borgmatic
retention:
keep_daily: 7
keep_weekly: 4
keep_monthly: 12
keep_yearly: 1
prefix: "{hostname}-"
consistency:
checks:
# Uncomment to always do integrity checks.
# (takes long time for larger repos)
#- repository
- disabled
check_last: 3
prefix: "{hostname}-"
hooks:
# Shell commands to execute before or after a backup
before_backup:
- echo "`date` - Starting custom actions"
<% @before_backup.each do |action| %>
- <%= action %>
<% end %>
- echo "`date` - Starting mysqldump"
- for i in $(lxc list --format csv -c n); do lxc exec $i -- sh -c "test -x /usr/bin/mysqldump && /usr/bin/mysqldump --all-databases > /var/lib/mysql/dump.sql || true"; done
- echo "`date` - Starting zfs-autobackup"
- zfs-autobackup -v local <%= @location_src.split('/')[0] %>
- echo "`date` - Starting borg"
- for i in $(zfs get -r -t filesystem,volume autobackup:local <%= @location_src %> | grep 'autobackup:local.*true' | tail -n +2 | awk '{print $1}'); do zfs set mountpoint=/var/backups/${i} $i ; zfs mount -o ro $i; done
after_backup:
- for i in $(zfs get -r -t filesystem,volume autobackup:local <%= @location_src %> | grep 'autobackup:local.*true' | tail -n +2 | awk '{print $1}'); do zfs umount $i; done
- echo "`date` - Finished backup"
healthchecks: <%= @healthchecks %>

View file

@ -0,0 +1,16 @@
# Chef InSpec test for recipe esh_borgmatic::default
# The Chef InSpec reference, with examples and extensive documentation, can be
# found at https://docs.chef.io/inspec/resources/
unless os.windows?
# This is an example test, replace with your own test.
describe user('root'), :skip do
it { should exist }
end
end
# This is an example test, replace it with your own test.
describe port(80), :skip do
it { should_not be_listening }
end

25
esh_cinc/.gitignore vendored Normal file
View file

@ -0,0 +1,25 @@
.vagrant
*~
*#
.#*
\#*#
.*.sw[a-z]
*.un~
# Bundler
Gemfile.lock
gems.locked
bin/*
.bundle/*
# test kitchen
.kitchen/
kitchen.local.yml
# Chef Infra
Berksfile.lock
.zero-knife.rb
Policyfile.lock.json
.idea/

Some files were not shown because too many files have changed in this diff Show more