package and document reverse proxy configuration

This commit is contained in:
jums
2025-02-23 08:42:09 +01:00
parent 75fa3ffa18
commit c14c486a35
4 changed files with 241 additions and 2 deletions

View File

@@ -9,7 +9,7 @@ This repository is packaging Inventaire for Docker production environement. To r
- [Requirements](#requirements)
- [Install](#install)
- [Webserver](#webserver)
- [Reverse proxy configuration](#reverse-proxy-configuration)
- [Usage](#usage)
- [Tips](#tips)
- [Fixtures](#fixtures)
@@ -74,7 +74,49 @@ echo "module.exports = {
" > ./inventaire/config/local-production.cjs
```
NB: Those username and password should match the `COUCHDB_USER` and `COUCHDB_PASSWORD` environment variables set in `docker-compose.yml`
## Reverse proxy configuration
Inventaire only provides configuration files for Nginx.
Run dependencies:
```sh
sudo mkdir -p /tmp/nginx/tmp /tmp/nginx/resize/img/users /tmp/nginx/resize/img/groups /tmp/nginx/resize/img/entities /tmp/nginx/resize/img/remote /tmp/nginx/resize/img/assets
```
Install nginx and certbot
Copy the nginx configuration template
```sh
PUBLIC_HOSTNAME=$(grep -oP 'PUBLIC_HOSTNAME=\K.*' .env) PROJECT_ROOT=$(grep -oP 'PROJECT_ROOT=\K.*' .env) envsubst < nginx/templates/default.conf.template > nginx/default
sudo mv nginx/default /etc/nginx/sites-available/default
```
Activate the configuration file
```sh
sudo ln -s /etc/nginx/sites-available/default.conf /etc/nginx/sites-enabled/default.conf
```
To generate the certificate for your domain as required to make https work, you can use Let's Encrypt:
```sh
sudo systemctl stop nginx
sudo certbot certonly --standalone --post-hook "systemctl restart nginx"
sudo systemctl restart nginx
```
When certbot is done, you may uncomment lines starting with `# ssl_certificate` and `# ssl_certificate_key` in `/etc/nginx/sites-available/default.conf` and restart nginx.
Certbot should have installed a cron to automatically renew your certificate.
Since nginx template supports webroot renewal, we suggest you to update the renewal config file to use the webroot authenticator:
```sh
# Replace authenticator = standalone by authenticator = webroot
# Add webroot_path = /var/www/certbot
sudo vim /etc/letsencrypt/renewal/your-domain.com.conf
```
## Usage

View File

@@ -0,0 +1,12 @@
# add_header from parent blocks are ignored when the current block also calls add_header
# Thus the need for this snippet, to redefine the same headers in many blocks
# See http://nginx.org/en/docs/http/ngx_http_headers_module.html#add_header
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always;
# opt out Google Floc see: https://plausible.io/blog/google-floc#how-to-opt-out-of-floc-as-a-web-developer-set-a-permissions-policy
add_header Permissions-Policy interest-cohort=();
# source: https://gist.github.com/plentz/6737338
add_header X-Frame-Options "SAMEORIGIN" always;
# source: https://scotthelme.co.uk/hardening-your-http-response-headers/#x-content-type-options
add_header X-Content-Type-Options 'nosniff' always;
# source: https://scotthelme.co.uk/a-new-security-header-referrer-policy/
add_header Referrer-Policy 'strict-origin' always;

11
nginx/snippets/ssl.conf Normal file
View File

@@ -0,0 +1,11 @@
# Recommended by https://ssl-config.mozilla.org/#server=nginx&version=1.18.0&config=intermediate&openssl=1.1.1f&guideline=5.6
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
ssl_stapling on;
ssl_stapling_verify on;
ssl_dhparam /etc/nginx/dhparam.pem;

View File

@@ -0,0 +1,174 @@
# PROJECT_ROOT and PUBLIC_HOSTNAME are set with nginx image function, which will extract environment variables before nginx starts
# See https://hub.docker.com/_/nginx
upstream inventaire {
server 127.0.0.1:3006 fail_timeout=5s;
}
# Using error_page as a way to have a named location that can
# then be shared between several locations, see:
# https://serverfault.com/questions/908086/nginx-directly-send-from-location-to-another-named-location
# https://www.nginx.com/resources/wiki/start/topics/depth/ifisevil/#what-to-do-instead
# Contrary to what the documentation says, the HTTP verbs aren't all converted to GET
# http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page
error_page 543 = @invserver;
server {
listen 80;
listen [::]:80;
# Required to be able to run `certbot -w /var/www/html/`
location /.well-known/ {
root /var/www/html/;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${PUBLIC_HOSTNAME};
# ssl_certificate /etc/nginx/ssl/live/${PUBLIC_HOSTNAME}/fullchain.pem;
# ssl_certificate_key /etc/nginx/ssl/live/${PUBLIC_HOSTNAME}/privkey.pem;
include /etc/nginx/snippets/ssl.conf;
client_max_body_size 25M;
# As long as no secret/sensible data are passed in the body, the BREACH exploit on TLS+compression shouldn't be a concern. Right?
# https://en.wikipedia.org/wiki/BREACH_(security_exploit)#Mitigation
# http://security.stackexchange.com/questions/39925/breach-a-new-attack-against-http-what-can-be-done
# It could be that it was solved by HTTP/2 \o/ https://blog.cloudflare.com/hpack-the-silent-killer-feature-of-http-2
gzip on;
gzip_types *;
# On-The-Fly Image Resizer
# URLs look like /img/users/300x1200/8185d4e039f52b4faa06a1c277133e9a8232551b
# for locally hosted images
# or /img/remote/300x1200/630022006?href=http%3A%2F%2Fescaped.url
# for remote images, with 630022006 being the hash of the passed href
# generated by [hashCode](https://github.com/inventaire/inventaire/blob/35b1e63/server/lib/utils/base.js#L69-L80)
# The hack: I couldn't make the proxy_store work: it never hits the cache, but
# it does put the resized images in /tmp/nginx/resize, so using a try_files
# directive instead
# Sometimes, for some unidentified reason, the cached files end up empty, so it can be useful to add a root cron to remove those files:
# 0 4 * * * /usr/bin/find /tmp/nginx -type f -size 0 -delete
# Do not remove the (.*) capture group as it seems to be required by the try_files
location ~ ^/img/(groups|users|entities|assets)/(.*) {
include /etc/nginx/snippets/security_headers.conf;
root /tmp/nginx/resize;
default_type "image/jpeg";
add_header Cache-Control "public, max-age=31536000, immutable";
add_header X-File-Cache "hit";
add_header Content-Security-Policy "sandbox";
try_files $uri @invimg;
limit_except GET {
deny all;
}
}
# Same as above, but without the immutable
location ~ ^/img/remote/(.*) {
include /etc/nginx/snippets/security_headers.conf;
root /tmp/nginx/resize;
default_type "image/jpeg";
add_header X-File-Cache "hit";
add_header Content-Security-Policy "sandbox";
try_files $uri @invimg;
limit_except GET {
deny all;
}
}
# following aliases made in order to respect the url structure
# the server alone would follow: especially, mounting /static on /public
root ${PROJECT_ROOT}/inventaire/client;
location /public/ {
include /etc/nginx/snippets/security_headers.conf;
limit_except GET {
deny all;
}
gzip_static on;
# Let resources that can't be cache busted
# - such as opensearch.xml or robots.txt -
# out of this caching policy
if ($uri ~ "^/public/(dist|fonts)/" ) {
include /etc/nginx/snippets/security_headers.conf;
add_header Cache-Control "public, max-age=31536000, immutable";
# All headers that aren't in the last block won't be taken in account
# thus the need to have CORS headers here too
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'GET' always;
}
}
# Pass the request to the node.js server
# with some correct headers for proxy-awareness
location /api {
return 543;
}
location /.well-known/webfinger {
return 543;
}
# Let the API server handle all but /public JSON and RSS requests
location ~ "^/[^p].*\.(json|rss)$" {
limit_except GET {
deny all;
}
return 543;
}
location @invserver {
include /etc/nginx/snippets/security_headers.conf;
# Let the server decide when CORS headers should be added
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
# Set a large value to let the API determine the appropriate
# timeout per endpoint
# http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout
proxy_read_timeout 3600;
proxy_redirect off;
proxy_http_version 1.1;
proxy_pass http://inventaire;
}
location = /favicon.ico {
include /etc/nginx/snippets/security_headers.conf;
try_files /public/$uri /public/images/$uri;
expires 30d;
add_header Cache-Control "public";
}
location = /robots.txt {
include /etc/nginx/snippets/security_headers.conf;
gzip_static on;
try_files /public/$uri /$uri;
expires 1d;
add_header Cache-Control "public";
}
# Prevent exposing git folders such as /public/i18n/.git
# For why this rule takes precedence over location /public/
# see http://stackoverflow.com/a/34262192/3324977
location ~ /\.git {
deny all;
}
location ^~ '/.well-known/acme-challenge' {
include /etc/nginx/snippets/security_headers.conf;
default_type "text/plain";
root /var/www/certbot;
}
}