🚀 big update:

added logging to all nginx routes
added loki, promtail to scrape nginx logs
turned i2pd back on,
updated my websites version
upgraded all hosts to 24.05
forgejo added bigger limit to upload limit due to docker images
privacy frontends:
    added priviblur
    libreddit -> redlib
    added biblioreads

ddns-updater, changed credentials but there is a bug with porkbun
added penpot
brought back anonymousoverflow
added readme privacy respecting frontends
This commit is contained in:
2005 2024-06-03 02:06:02 +02:00
parent b6107679d1
commit 9216cbbf62
44 changed files with 610 additions and 374 deletions

View file

@ -21,6 +21,22 @@
"type": "github"
}
},
"backend": {
"flake": false,
"locked": {
"lastModified": 1714162491,
"narHash": "sha256-ncbQIX1XB2XL8lRrIyVzvloev/yuMAeT7O2S1sky1No=",
"owner": "TeamPiped",
"repo": "Piped-Backend",
"rev": "d67e50b5b8aa1b44930574cf67a420f75a08d1e2",
"type": "github"
},
"original": {
"owner": "TeamPiped",
"repo": "Piped-Backend",
"type": "github"
}
},
"darwin": {
"inputs": {
"nixpkgs": [
@ -61,6 +77,93 @@
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"inputs": {
"systems": "systems_4"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_4": {
"locked": {
"lastModified": 1631561581,
"narHash": "sha256-3VQMV5zvxaVLvqqUrNz3iJelLw30mIVSfZmAaauM3dA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "7e5bf3925f6fbdfaf50a2a7ca0be2879c4261d19",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"frontend": {
"flake": false,
"locked": {
"lastModified": 1717236695,
"narHash": "sha256-F/tWsF+StCKi+HQg1CtxVZNCOXG2o7ufhC2IezbSF2s=",
"owner": "TeamPiped",
"repo": "Piped",
"rev": "f94d56c16540282266b94d33c5a865268e255a70",
"type": "github"
},
"original": {
"owner": "TeamPiped",
"repo": "Piped",
"type": "github"
}
},
"gradle2nix": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_3"
},
"locked": {
"lastModified": 1717114617,
"narHash": "sha256-zYYnEIpJAST9tm7A58FG+XOuKEDnH8WUGKcXfsTCsoM=",
"owner": "tadfisher",
"repo": "gradle2nix",
"rev": "a9353317959b7627a5754c6863e58231988f548a",
"type": "github"
},
"original": {
"owner": "tadfisher",
"ref": "v2",
"repo": "gradle2nix",
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
@ -163,27 +266,142 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1716633019,
"narHash": "sha256-xim1b5/HZYbWaZKyI7cn9TJCM6ewNVZnesRr00mXeS4=",
"lastModified": 1717144377,
"narHash": "sha256-F/TKWETwB5RaR8owkPPi+SPJh83AQsm6KrQAlJ8v/uA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9d29cd266cebf80234c98dd0b87256b6be0af44e",
"rev": "805a384895c696f802a9bf5bf4720f37385df547",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1716769173,
"narHash": "sha256-7EXDb5WBw+d004Agt+JHC/Oyh/KTUglOaQ4MNjBbo5w=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9ca3f649614213b2aaf5f1e16ec06952fe4c2632",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1702151865,
"narHash": "sha256-9VAt19t6yQa7pHZLDbil/QctAgVsA66DLnzdRGqDisg=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "666fc80e7b2afb570462423cb0e1cf1a3a34fedd",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"piped": {
"inputs": {
"backend": "backend",
"frontend": "frontend",
"gradle2nix": "gradle2nix",
"nixpkgs": [
"nixpkgs"
],
"pnpm2nix": "pnpm2nix",
"proxy": "proxy"
},
"locked": {
"lastModified": 1717288685,
"narHash": "sha256-yVKs0wx84eNn01TymHUxp+D2ayoHSNe2HfYwlqo2b2g=",
"owner": "Defelo",
"repo": "piped-nix",
"rev": "12259e1f04e8f981fc0953b1500879de86ca3099",
"type": "github"
},
"original": {
"owner": "Defelo",
"repo": "piped-nix",
"type": "github"
}
},
"pnpm2nix": {
"inputs": {
"flake-utils": "flake-utils_3",
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1706694632,
"narHash": "sha256-ytyTwNPiUR8aq74QlxFI+Wv3MyvXz5POO1xZxQIoi0c=",
"owner": "nzbr",
"repo": "pnpm2nix-nzbr",
"rev": "0366b7344171accc2522525710e52a8abbf03579",
"type": "github"
},
"original": {
"owner": "nzbr",
"repo": "pnpm2nix-nzbr",
"type": "github"
}
},
"proxy": {
"flake": false,
"locked": {
"lastModified": 1717106863,
"narHash": "sha256-N6F6PLg3e78NW/b/c+jI7skUyc9hyClc1MSDaNO8aCo=",
"owner": "TeamPiped",
"repo": "piped-proxy",
"rev": "aad4375921bca890b927f6d4c50dcebd0b65cc2d",
"type": "github"
},
"original": {
"owner": "TeamPiped",
"repo": "piped-proxy",
"type": "github"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"home-manager": "home-manager_2",
"i2pd-exporter": "i2pd-exporter",
"microvm": "microvm",
"nixpkgs": "nixpkgs_2"
"nixpkgs": "nixpkgs_2",
"piped": "piped",
"scribe": "scribe"
}
},
"scribe": {
"inputs": {
"flake-utils": "flake-utils_4",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1708785571,
"narHash": "sha256-s44+L6GSNHI4rBXiICyN64V1gT5WBIELNbR5R/2GH4A=",
"ref": "refs/heads/main",
"rev": "8dda4233ac3a817ccf29c3da3a8b25ddcdffa8ce",
"revCount": 137,
"type": "git",
"url": "https://git.sr.ht/~edwardloveall/scribe"
},
"original": {
"type": "git",
"url": "https://git.sr.ht/~edwardloveall/scribe"
}
},
"spectrum": {
@ -231,6 +449,36 @@
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_4": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View file

@ -2,7 +2,7 @@
description = "4o1x5 infrastructure/homelab";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05";
home-manager = {
url = "github:nix-community/home-manager/release-23.11";
inputs.nixpkgs.follows = "nixpkgs";
@ -12,6 +12,16 @@
url = "git+https://git.4o1x5.dev/4o1x5/i2pd-exporter";
inputs.nixpkgs.follows = "nixpkgs";
};
piped = {
url = "github:Defelo/piped-nix";
inputs.nixpkgs.follows = "nixpkgs";
};
scribe = {
url = "git+https://git.sr.ht/~edwardloveall/scribe";
inputs.nixpkgs.follows = "nixpkgs";
};
microvm = {
url = "github:astro/microvm.nix";
@ -27,13 +37,14 @@
, i2pd-exporter
, microvm
, agenix
, scribe
, piped
}:
let
system = "x86_64-linux";
in
{
nixosConfigurations = {
pink = nixpkgs.lib.nixosSystem {
inherit system;
modules = [
@ -56,6 +67,8 @@
./root.nix
./secrets/carbon.nix
agenix.nixosModules.default
scribe.nixosModules.default
piped.nixosModules.default
home-manager.nixosModules.home-manager
{
home-manager.useGlobalPkgs = true;

View file

@ -1,5 +1,4 @@
{ pkgs, ... }: {
#
imports = [
./hardware-configuration.nix
@ -13,7 +12,10 @@
./services/routes/openproject.nix
./services/routes/hydra.nix
./services/routes/csengo.nix
./services/routes/penpot.nix
./services/routes/matrix.nix
./services/routes/learningpulsedev.nix
#./services/ai.nix
@ -25,7 +27,7 @@
# privacy services
./services/privacy/libreddit.nix
#./services/privacy/safetwitch.nix
./services/privacy/safetwitch.nix
#./services/privacy/piped.nix
./services/privacy/breezewiki.nix
./services/privacy/gothub.nix
@ -38,12 +40,18 @@
./services/privacy/libremdb.nix
./services/privacy/librey.nix
./services/privacy/dumb.nix
./services/privacy/priviblur.nix
#./services/privacy/biblioreads.nix
#./services/privacy/proxitok.nix
#./services/privacy/scribe.nix
#./services/privacy/searxng.nix
#./services/privacy/wikiless.nix
# monitoring
./services/monitoring/exporters/node.nix
./services/monitoring/exporters/smartctl.nix
./services/monitoring/promtail.nix
];
networking.hostName = "carbon";
networking.domain = "4o1x5.dev";

View file

@ -26,6 +26,10 @@
locations."/" = {
proxyPass = " http://127.0.0.1:3000";
};
extraConfig = ''
client_max_body_size 8192M;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};
@ -38,13 +42,12 @@
settings = {
container = {
# TODO fix: networking
# instead of using host, create a subnet that cannot contat other server on my network to avoid being haxxed
# instead of using host, create a subnet that cannot contact other server on my network to avoid being haxxed
network = "host";
};
};
labels = [
"ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
];
name = config.networking.domain;
};

View file

@ -1,33 +1,46 @@
{ pkgs, ... }: {
{ pkgs, config, lib, ... }: {
networking.firewall = {
allowedTCPPorts = [ config.services.promtail.configuration.server.http_listen_port ];
allowedUDPPorts = [ config.services.promtail.configuration.server.http_listen_port ];
};
#$ var/ sudo setfacl -R -m u:promtail:rX log
#$ sudo chown promtail:promtail /tmp/positions.yaml
#$ sudo usermod -a -G systemd-journal promtail
# makeshift permission since promtail by default has no permission to read /var
services.promtail = {
enable = true;
configuration =
{
server = {
http_listen_port = 0;
http_listen_port = 6177;
grpc_listen_port = 0;
log_level = "debug";
};
positions = {
filename = "/tmp/positions.yaml";
};
clients = [
{ url = "https//32.54.31.99:3100/api/prom/push"; }
{
url = "http://32.54.31.99:3100/loki/api/v1/push";
tenant_id = 1;
}
];
scrape_configs = [
{
job_name = "system";
pipeline_stages = [
{ replace = { expression = "(?:[0-9]{1,3}\\.){3}([0-9]{1,3})"; replace = "***"; }; }
];
job_name = "nginx";
static_configs = [
{
targets = [ "localhost" ];
labels = {
job = "nginx_access_log";
job = "nginx";
host = "carbon";
agent = "promtail";
__path__ = "/var/log/nginx/json_access.log";
__path__ = ''/var/log/nginx/*-access.log'';
};
}
];
@ -36,4 +49,3 @@
};
};
}

View file

@ -1,7 +1,79 @@
{ pkgs, inputs, config, ... }:
let
# define logging format in json
# https://grafana.com/grafana/dashboards/12559-loki-nginx-service-mesh-json-version/
log_format = ''
log_format json_analytics escape=json '{'
'"msec": "$msec", ' # request unixtime in seconds with a milliseconds resolution
'"connection": "$connection", ' # connection serial number
'"connection_requests": "$connection_requests", ' # number of requests made in connection
'"pid": "$pid", ' # process pid
'"request_id": "$request_id", ' # the unique request id
'"request_length": "$request_length", ' # request length (including headers and body)
'"remote_addr": "$remote_addr", ' # client IP
'"remote_user": "$remote_user", ' # client HTTP username
'"remote_port": "$remote_port", ' # client port
'"time_local": "$time_local", '
'"time_iso8601": "$time_iso8601", ' # local time in the ISO 8601 standard format
'"request": "$request", ' # full path no arguments if the request
'"request_uri": "$request_uri", ' # full path and arguments if the request
'"args": "$args", ' # args
'"status": "$status", ' # response status code
'"body_bytes_sent": "$body_bytes_sent", ' # the number of body bytes exclude headers sent to a client
'"bytes_sent": "$bytes_sent", ' # the number of bytes sent to a client
'"http_referer": "$http_referer", ' # HTTP referer
'"http_user_agent": "$http_user_agent", ' # user agent
'"http_x_forwarded_for": "$http_x_forwarded_for", ' # http_x_forwarded_for
'"http_host": "$http_host", ' # the request Host: header
'"server_name": "$server_name", ' # the name of the vhost serving the request
'"request_time": "$request_time", ' # request processing time in seconds with msec resolution
'"upstream": "$upstream_addr", ' # upstream backend server for proxied requests
'"upstream_connect_time": "$upstream_connect_time", ' # upstream handshake time incl. TLS
'"upstream_header_time": "$upstream_header_time", ' # time spent receiving upstream headers
'"upstream_response_time": "$upstream_response_time", ' # time spend receiving upstream body
'"upstream_response_length": "$upstream_response_length", ' # upstream response length
'"upstream_cache_status": "$upstream_cache_status", ' # cache HIT/MISS where applicable
'"ssl_protocol": "$ssl_protocol", ' # TLS protocol
'"ssl_cipher": "$ssl_cipher", ' # TLS cipher
'"scheme": "$scheme", ' # http or https
'"request_method": "$request_method", ' # request method
'"server_protocol": "$server_protocol", ' # request protocol, like HTTP/1.1 or HTTP/2.0
'"pipe": "$pipe", ' # "p" if request was pipelined, "." otherwise
'"gzip_ratio": "$gzip_ratio", '
'"http_cf_ray": "$http_cf_ray",'
'"geoip_country_code": "$geoip2_data_country_code"'
'}';
'';
# geoip2 module to get countries from addresses
geoip2 = ''
geoip2 /etc/GeoLite2-Country.mmdb {
auto_reload 5m;
$geoip2_metadata_country_build metadata build_epoch;
$geoip2_data_country_code default=US source=$remote_addr country iso_code;
$geoip2_data_country_name country names en;
}
geoip2 /etc/GeoLite2-City.mmdb {
$geoip2_data_city_name default=London city names en;
}
'';
in
{
services.nginx = {
enable = true;
additionalModules = [ pkgs.nginxModules.geoip2 ];
# add logging to config
commonHttpConfig = ''
${geoip2}
${log_format}
'';
#access_log /var/log/nginx/json_access.log json_analytics;
# ^^ adds logging to every host
virtualHosts = {
"www.${config.networking.domain}" = {
@ -11,6 +83,10 @@
locations."/" = {
root = pkgs.callPackage ../services/website/default.nix { };
};
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
"${config.networking.domain}" = {
@ -20,12 +96,14 @@
locations."/" = {
root = pkgs.callPackage ../services/website/default.nix { };
};
extraConfig = ''
error_page 404 /404.html;
deny 3.1.202.244;
deny 170.64.219.93;
deny 91.215.85.43;
client_max_body_size 900M;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};

View file

@ -8,8 +8,8 @@
"7344:8080"
];
environment = {
APP_URL = "https://.anonymousoverflow.4o1x5.dev";
# TODO add JTW_SIGNING_KEY to work
APP_URL = "https://anonymousoverflow.4o1x5.dev";
JWT_SIGNING_SECRET = "${config.age.secrets.anonymousoverflow.path}";
};
};
};
@ -21,6 +21,9 @@
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:7344";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -10,9 +10,9 @@
];
};
};
services.nginx = {
virtualHosts = {
"biblioreads.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;

View file

@ -16,6 +16,13 @@
locations."/" = {
proxyPass = " http://127.0.0.1:7382";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
rewrite ^/www.pinterest.com$ http://binternet.${config.networking.domain}/ permanent;
rewrite ^/pinterest.com$ http://binternet.${config.networking.domain}/ permanent;
'';
# ^^^ libreddirect for some reason forgets to delete the pinterest domain
# fixing this bug
};
};
};

View file

@ -17,6 +17,9 @@
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:1584";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -17,6 +17,9 @@
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:8332";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -29,6 +29,9 @@
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:4032";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -1,21 +1,24 @@
{ pkgs, config, ... }: {
# todo redlib instead of libreddit
services.libreddit = {
enable = true;
address = "127.0.0.1";
port = 3672;
package = pkgs.redlib;
};
services.nginx = {
virtualHosts = {
# Privacy services
"libreddit.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:3672";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -26,6 +26,9 @@
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:7345";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -36,6 +36,9 @@
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:3345";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -8,26 +8,11 @@
backend = {
port = 5632;
database = {
# TODO fix
#TODO SECRET
host = "127.0.0.1";
username = "piped-backend";
passwordFile = ./piped;
database = "piped-backend";
passwordFile = config.age.secrets.piped.path;
createLocally = false;
};
};
};
services.postgresql = {
enable = true;
enableTCPIP = true;
ensureDatabases = [ "piped-backend" ];
ensureUsers = [
{
name = "piped-backend";
ensureDBOwnership = true;
}
];
};
}

View file

@ -1,7 +1,7 @@
{ pkgs, config, ... }: {
virtualisation.oci-containers.containers = {
# todo fix, requires a config file....
priviblur = {
image = "quay.io/pussthecatorg/priviblur:latest";
ports = [
@ -11,7 +11,6 @@
};
services.nginx = {
virtualHosts = {
"priviblur.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;
@ -24,5 +23,4 @@
};
};
};
}

View file

@ -1,5 +1,5 @@
# Auto-generated using compose2nix v0.2.0-pre.
{ pkgs, lib, ... }:
{ pkgs, lib, config, ... }:
{
services.nginx = {
@ -61,7 +61,7 @@
};
systemd.services."podman-proxitok-signer" = {
serviceConfig = {
Restart = lib.mkOverride 500 "\"no\"";
Restart = lib.mkOverride 500 "no";
};
after = [
"podman-network-docker-compose_proxitok.service"
@ -86,6 +86,7 @@
LATTE_CACHE = "/cache";
REDIS_HOST = "proxitok-redis";
REDIS_PORT = "6379";
APP_URL = "https://proxitok.${config.networking.domain}";
};
volumes = [
"proxitok-cache:/cache:rw"
@ -108,9 +109,10 @@
"--security-opt=no-new-privileges:true"
];
};
systemd.services."podman-proxitok-web" = {
serviceConfig = {
Restart = lib.mkOverride 500 "\"no\"";
Restart = lib.mkOverride 500 "no";
};
after = [
"podman-network-docker-compose_proxitok.service"

View file

@ -34,6 +34,9 @@
locations."/" = {
proxyPass = " http://127.0.0.1:2355";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -29,7 +29,7 @@
# Request URL
PRIVACY_URL = "true";
# Device Type (User agent)
PRIVACY_DEVICE = "false";
PRIVACY_DEVICE = "true";
PRIVACY_DIAGNOSTICS = "false";
};
@ -44,6 +44,9 @@
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:4312";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};

View file

@ -1,8 +1,6 @@
{ pkgs, config, ... }:
{
virtualisation.oci-containers.containers = {
# TODO fix routing
safe-twitch-frontend = {
image = "codeberg.org/safetwitch/safetwitch:latest";
ports = [
@ -22,7 +20,7 @@
"7100:7100"
];
environment = {
URL = "sf.${config.networking.domain}";
URL = "https://sf.${config.networking.domain}";
PORT = "7100";
};
};
@ -30,26 +28,27 @@
services.nginx = {
virtualHosts = {
"safetwitch.${config.networking.domain}" =
{
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:8280";
};
};
"sf.${config.networking.domain}" = {
"safetwitch.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:7100";
proxyPass = " http://127.0.0.1:8280";
};
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
"sf.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:7100";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};
};
}

View file

@ -5,10 +5,12 @@
enable = true;
appDomain = "scribe.${config.networking.domain}";
port = 7283;
# TODO fix since it's readable by nix store...
# TODO , systemd doesnt like this, neither does it actually include the secrets
environmentFile = ''
GITHUB_PERSONAL_ACCESS_TOKEN= ${builtins.readFile config.age.secrets.github-token.path}
GITHUB_USERNAME= ${builtins.readFile config.age.secrets.github-username.path}
GITHUB_PERSONAL_ACCESS_TOKEN=${config.age.secrets.github-token.path}
GITHUB_USERNAME=${config.age.secrets.github-username.path}
SECRET_KEY_BASE=${config.age.secrets.scribe-secret.path}
'';
};

View file

@ -1,5 +1,5 @@
{ pkgs, config, ... }: {
# TODO rework the whole thing
virtualisation.oci-containers.containers = {
searxng = {
@ -7,11 +7,6 @@
ports = [
"3345:3000"
];
# TODO implement limiter
#volumes = [
# "/home/carbon/searxng.yml:/etc/searxng:rw"
#];
};
};

View file

@ -1,118 +0,0 @@
# Auto-generated using compose2nix v0.2.0-pre.
{ pkgs, lib, ... }:
{
services.nginx = {
virtualHosts = {
"wikiless.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://127.0.0.1:8180";
extraConfig = ''
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};
};
# Containers
virtualisation.oci-containers.containers."wikiless" = {
image = "ghcr.io/metastem/wikiless:latest";
environment = {
REDIS_HOST = "redis://172.4.0.5:6379";
};
ports = [
"127.0.0.1:8180:8080/tcp"
];
dependsOn = [
"wikiless_redis"
];
log-driver = "journald";
extraOptions = [
"--cap-drop=ALL"
"--hostname=wikiless"
"--ip=172.4.0.6"
"--network-alias=wikiless"
"--network=docker-compose_wikiless_net"
"--security-opt=no-new-privileges:true"
];
};
systemd.services."podman-wikiless" = {
serviceConfig = {
Restart = lib.mkOverride 500 "always";
};
after = [
"podman-network-docker-compose_wikiless_net.service"
];
requires = [
"podman-network-docker-compose_wikiless_net.service"
];
partOf = [
"podman-compose-docker-compose-root.target"
];
wantedBy = [
"podman-compose-docker-compose-root.target"
];
};
virtualisation.oci-containers.containers."wikiless_redis" = {
image = "redis:latest";
user = "nobody";
log-driver = "journald";
extraOptions = [
"--cap-add=DAC_OVERRIDE"
"--cap-add=SETGID"
"--cap-add=SETUID"
"--cap-drop=ALL"
"--hostname=wikiless_redis"
"--ip=172.4.0.5"
"--network-alias=wikiless_redis"
"--network=docker-compose_wikiless_net"
"--security-opt=no-new-privileges:true"
];
};
systemd.services."podman-wikiless_redis" = {
serviceConfig = {
Restart = lib.mkOverride 500 "always";
};
after = [
"podman-network-docker-compose_wikiless_net.service"
];
requires = [
"podman-network-docker-compose_wikiless_net.service"
];
partOf = [
"podman-compose-docker-compose-root.target"
];
wantedBy = [
"podman-compose-docker-compose-root.target"
];
};
# Networks
systemd.services."podman-network-docker-compose_wikiless_net" = {
path = [ pkgs.podman ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStop = "${pkgs.podman}/bin/podman network rm -f docker-compose_wikiless_net";
};
script = ''
podman network inspect docker-compose_wikiless_net || podman network create docker-compose_wikiless_net --subnet=172.4.0.0/16
'';
partOf = [ "podman-compose-docker-compose-root.target" ];
wantedBy = [ "podman-compose-docker-compose-root.target" ];
};
# Root service
# When started, this will automatically create all resources and start
# the containers. When stopped, this will teardown all resources.
systemd.targets."podman-compose-docker-compose-root" = {
unitConfig = {
Description = "Root target generated by compose2nix.";
};
wantedBy = [ "multi-user.target" ];
};
}

View file

@ -13,6 +13,7 @@
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};

View file

@ -14,6 +14,7 @@
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};

View file

@ -2,22 +2,36 @@
services.nginx = {
virtualHosts = {
"lpdev.${config.networking.domain}" =
{
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://32.54.31.99:8181";
};
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
'';
"lpdev.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://32.54.31.99:8181";
};
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
"lpdev-eureka.${config.networking.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = " http://32.54.31.99:8761";
};
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};
};
};
}

View file

@ -24,6 +24,7 @@ in
};
extraConfig = ''
client_max_body_size 9000M;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
"${fqdn}" = {
@ -33,6 +34,7 @@ in
locations."/_matrix".proxyPass = "http://32.54.31.241:8008";
locations."/_synapse".proxyPass = "http://32.54.31.241:8008";
locations."= /.well-known/matrix/client" .extraConfig = mkWellKnown clientConfig;
};
};
};

View file

@ -16,6 +16,7 @@
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};

View file

@ -19,6 +19,7 @@
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};

View file

@ -13,6 +13,7 @@
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
access_log /var/log/nginx/$server_name-access.log json_analytics;
'';
};
};

View file

@ -2,7 +2,7 @@
pkgs.stdenv.mkDerivation rec {
name = "website";
version = "0.1.25";
version = "0.1.29";
src = /home/grape/code/4o1x5/website;
buildInputs = [ pkgs.hugo ];

View file

@ -5,7 +5,8 @@
{
imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usb_storage" "sd_mod" "sr_mod" "sdhci_pci" ];
@ -14,13 +15,13 @@
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/16777af0-cb7b-470c-a172-d1761e6a8a12";
{
device = "/dev/disk/by-uuid/16777af0-cb7b-470c-a172-d1761e6a8a12";
fsType = "ext4";
};
swapDevices =
[ { device = "/dev/disk/by-uuid/57420b12-9218-4110-9fb2-22ca3171f6a0"; }
];
[{ device = "/dev/disk/by-uuid/57420b12-9218-4110-9fb2-22ca3171f6a0"; }];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's

View file

@ -5,15 +5,15 @@
./hardware-configuration.nix
./services/firewall.nix
#./services/loki.nix
./services/mumble.nix
#./services/ddns-updater.nix
#./services/ai.nix
./services/ddns-updater.nix
./services/postgresql.nix
./services/matrix.nix
./services/owncast.nix
./services/penpot/docker-compose.nix
# monitoring
./services/monitoring/exporters/node.nix
@ -27,5 +27,9 @@
networking.hostName = "lime";
networking.domain = "4o1x5.dev";
users.users.lime = {
isNormalUser = true;
description = "lime";
extraGroups = [ "networkmanager" "wheel" "docker" ];
};
}

View file

@ -1,12 +1,19 @@
{ pkgs, config, ... }: {
services.inadyn = {
enable = true;
# TODO fix
# TODO wait for fix
# https://github.com/troglobit/inadyn/issues/483
enable = false;
settings.provider.porkbun = {
username = config.age.secrets.porkbun-user.path;
password = config.age.secrets.porkbun.path;
checkip-server = "icanhazip.com";
username = config.age.secrets.porkbun-user.path; # public key
password = config.age.secrets.porkbun.path; # private key
#hostname = [
# "4o1x5.dev"
# "*.4o1x5.dev"
#];
ssl = true;
ttl = 600;
};
};
}

View file

@ -6,7 +6,6 @@
virtualisation.podman = {
enable = true;
autoPrune.enable = true;
dockerCompat = true;
defaultNetwork.settings = {
# Required for container networking to be able to use names.
dns_enabled = true;

View file

@ -1,114 +0,0 @@
version: "3.8"
networks:
penpot:
volumes:
penpot_postgres_v15:
penpot_assets:
services:
penpot-frontend:
image: "penpotapp/frontend:latest"
restart: always
ports:
- 9032:80
volumes:
- penpot_assets:/opt/data/assets
depends_on:
- penpot-backend
- penpot-exporter
networks:
- penpot
labels:
- "traefik.enable=true"
environment:
- PENPOT_FLAGS=enable-registration enable-login-with-password
penpot-backend:
image: "penpotapp/backend:latest"
restart: always
volumes:
- penpot_assets:/opt/data/assets
depends_on:
- penpot-postgres
- penpot-redis
networks:
- penpot
## Configuration envronment variables for backend the
## container.
environment:
- PENPOT_FLAGS=enable-registration enable-login-with-password disable-email-verification enable-smtp enable-prepl-server
- PENPOT_PUBLIC_URI=https://penpot.4o1x5.dev
- PENPOT_DATABASE_URI=postgresql://penpot-postgres/penpot
- PENPOT_DATABASE_USERNAME=penpot
- PENPOT_DATABASE_PASSWORD=penpot
- PENPOT_REDIS_URI=redis://penpot-redis/0
- PENPOT_ASSETS_STORAGE_BACKEND=assets-fs
- PENPOT_STORAGE_ASSETS_FS_DIRECTORY=/opt/data/assets
- PENPOT_TELEMETRY_ENABLED=false
- PENPOT_SMTP_DEFAULT_FROM=no-reply@example.com
- PENPOT_SMTP_DEFAULT_REPLY_TO=no-reply@example.com
- PENPOT_SMTP_HOST=penpot-mailcatch
- PENPOT_SMTP_PORT=1025
- PENPOT_SMTP_USERNAME=
- PENPOT_SMTP_PASSWORD=
- PENPOT_SMTP_TLS=false
- PENPOT_SMTP_SSL=false
penpot-exporter:
image: "penpotapp/exporter:latest"
restart: always
networks:
- penpot
environment:
- PENPOT_PUBLIC_URI=http://penpot-frontend
- PENPOT_REDIS_URI=redis://penpot-redis/0
penpot-postgres:
image: "postgres:15"
restart: always
stop_signal: SIGINT
volumes:
- penpot_postgres_v15:/var/lib/postgresql/data
networks:
- penpot
environment:
- POSTGRES_INITDB_ARGS=--data-checksums
- POSTGRES_DB=penpot
- POSTGRES_USER=penpot
- POSTGRES_PASSWORD=penpot
penpot-redis:
image: redis:7
restart: always
networks:
- penpot
penpot-mailcatch:
image: sj26/mailcatcher:latest
restart: always
expose:
- "1025"
ports:
- "1080:1080"
networks:
- penpot

View file

@ -14,6 +14,7 @@
# monitoring
./services/monitoring/prometheus.nix
./services/monitoring/loki.nix
./services/monitoring/grafana.nix
./services/monitoring/exporters/node.nix
./services/monitoring/exporters/smartctl.nix
@ -30,7 +31,12 @@
isNormalUser = true;
description = "pink";
extraGroups = [ "networkmanager" "wheel" "docker" ];
packages = with pkgs; [ ];
};
users.users."pink".openssh.authorizedKeys.keys = [
# for learning pulse deploy action
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDLEgQ0G40vIpnUmO0CUj6tyj2khAwcejs15h+BuMmhz/+v7ljRsu2G6cnex2lXpEmFQgIsmA0JAj8d8yFG8lp+pdIsdiqYmRhr3Tlc1FE5NinlYkSbeZi28bDf8AVL45Jbvu5wNYt/gJvj1G73oI0L1qz2y73q+g0FEjANtBXg9eN8/Scf1ohZco017lzcLNUCIxavwuzSRsWLHCfn7h1nrhdmFCUm7dipnycYX3osHrohT/7/rwikyjgEpioimVHvilLwMboQ22B4ztKqNgs+8Ob/UZTLZ0tzOGJ4hIa/PqZC0Az6Id44Mi8B6fY0qi9qO+fL39uim6sYxbden5E3QYxdufR28Zs5IqAMBRCmu3F9CzHWEggqbqS9KUxOdJwez7zk6nCjo5Jimzpuf8W2M8ty1F3jt98q009l0yQoTs04SE5DNWkmAFVLDZFgqBrcGp5e8UzuVIZoFOYpdMrre5Jd6Dur2WUqMlFKQLjBN36xYR0YKfNz347TucLr7aJwJ9ZkN1UthgaLna07tkry9f7YjsBOvhOSf+LBofpWgaQ8NRdgDjskFc4AjmqkB0i7giwkQFBzLGYtl3njPYW6sfEg89pQtxzxIi09iEuK2h/H1EjhJyQ3KmN/sRG6eutpexOCKFc+YqW/u303tTHGPGIGaCbjjIpR90vz3brRWQ== server@pink"
];
}

View file

@ -18,6 +18,8 @@
8422 # csengoclient
5333 # csengoserver
5432
# learningpulse
8181
];
allowedUDPPorts = [
@ -35,6 +37,9 @@
5333 # csengoserver
5432
# learningpulse
8181
config.services.i2pd.port
];
};

View file

@ -1,7 +1,7 @@
{ pkgs, lib, config, ... }: {
services.i2pd = {
enable = false;
enable = true;
port = 9732;
enableIPv6 = true;
floodfill = true;
@ -26,7 +26,7 @@
# need to create a nginx proxy that proxies the reseed file
services.prometheus.exporters.i2pd = {
enable = false;
enable = true;
port = 3321;
openFirewall = true;
routerAddress = "https://127.0.0.1:${toString config.services.i2pd.proto.i2pControl.port}";

View file

@ -19,6 +19,11 @@ in
url = "http://127.0.0.1:${toString config.services.prometheus.port}";
isDefault = true;
}
# {
# name = "loki";
# type = "loki";
# url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}";
# }
];
};
};

View file

@ -1,46 +1,42 @@
{ pkgs, ... }: {
#
{ pkgs, config, ... }: {
services.loki = {
enable = false;
enable = true;
configuration = {
auth_enabled = false;
server = {
http_listen_port = 3100;
grpc_listen_port = 9096;
};
ingester = {
lifecycler = {
address = "0.0.0.0";
ring = {
kvstore.store = "inmemory";
replication_factor = 1;
};
common = {
instance_addr = "127.0.0.1";
path_prefix = "/tmp/loki";
storage = { filesystem = { chunks_directory = "/tmp/loki/chunks"; rules_directory = "/tmp/loki/rules"; }; };
replication_factor = 1;
ring = { kvstore = { store = "inmemory"; }; };
};
query_range = {
results_cache.cache.embedded_cache = {
enabled = true;
max_size_mb = 100;
};
chunk_idle_period = "15m";
};
schema_config.configs = [
{
from = "2020-02-25";
store = "boltdb";
object_store = "filesystem";
schema = "v11";
index = {
prefix = "index_";
period = "24h";
};
}
];
storage_config = {
boltdb.directory = "/tmp/loki/index";
schema_config = {
configs = [
{
from = "2020-10-24";
store = "tsdb";
object_store = "filesystem";
schema = "v13";
index = { prefix = "index_"; period = "24h"; };
}
];
};
limits_config = {
enforce_metric_name = false;
reject_old_samples = true;
reject_old_samples_max_age = "500h";
};
chunk_store_config.max_look_back_period = "0s";
};
};
networking.firewall = {
allowedTCPPorts = [ config.services.loki.configuration.server.http_listen_port ];
allowedUDPPorts = [ config.services.loki.configuration.server.http_listen_port ];
};
}

View file

@ -1,3 +1,47 @@
# Infrastructure of 4o1x5.dev
Nix configs for all servers in my homelab. Including all services available for public and private use.
## Setting up projects that don't have options in nixkpgs
Using compose2nix projects can be converted into `oci-container` definitions which we can use to deploy.
## Privacy respecting services
I will most likely deploy most of the services available in Libredirect. Since no projects are made equally, its really hard to set them up one by one.
Most projects don't even have a docker container, let alone a guide to deploy them. Some have nix flakes, but are missing crucial features or they are configured all wrong...
Here is a list of services I plan on hosting and their statuses.
| name | deployed? | info | I2P | Tor | Lokinet | Announced to instance list |
| -------------------------------------------------------- | --------- | ------------------------------------------------------------------------ | --- | --- | ------- | ------------------------------------------------ |
| [anonymousoverflow](https://anonymousoverflow.4o1x5.dev) | ✅ | works, but secret needs work | ❌ | ❌ | ❌ | ❌ |
| [binternet](https://binternet.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ✅ |
| [breezewiki](https://breezewiki.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ❌ |
| [dumb](https://dumb.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ❌ |
| [gothub](https://gothub.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ❌ (requires me to sign up for github (fuck no)) |
| [libreddit](https://libreddit.4o1x5.dev) | ✅ | needs to be migrated to redlib | ❌ | ❌ | ❌ | ❌ (owner didn't respond) |
| [libremdb](https://libremdb.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ❌ |
| [librey](https://librey.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ❌ |
| [piped](https://piped.4o1x5.dev) | ❌ | piped-nix deploys the database wrong | ❌ | ❌ | ❌ | ❌ |
| [priviblur](https://priviblur.4o1x5.dev) | ❌ | need config file defined (impure) | ❌ | ❌ | ❌ | ❌ |
| [quetre](https://quetre.4o1x5.dev) | ❓ | return 503 | ❌ | ❌ | ❌ | ❌ |
| [rimgo](https://rimgo.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ❌ |
| [safetwitch](https://safetwitch.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ✅ |
| [searxng](https://searxng.4o1x5.dev) | ❌ | no config option in nixpkgs | ❌ | ❌ | ❌ | ❌ |
| [hyperpipe](https://hyperpipe.4o1x5.dev) | ❌ | no docker compose prioject & needs piped server | ❌ | ❌ | ❌ | ❌ |
| [proxitok](https://proxitok.4o1x5.dev) | ❌ | complicated to setup | ❌ | ❌ | ❌ | ❌ |
| [proxigram](https://proxigram.4o1x5.dev) | ❌ | deprecated | ❌ | ❌ | ❌ | ❌ |
| pixivfe | ❌ | [csam platform](https://www.bbc.com/news/uk-65932372), will never deploy | ❌ | ❌ | ❌ | ❌ |
| [scribe](https://scribe.4o1x5.dev) | ❌ | flake has no secret for variables | ❌ | ❌ | ❌ | ❌ |
| [laboratory](https://laboratory.4o1x5.dev) | ❌ | todo | ❌ | ❌ | ❌ | ❌ |
| [reuter](https://reuters.4o1x5.dev) | ❌ | todo | ❌ | ❌ | ❌ | ❌ |
| [snopes](https://snopes.4o1x5.dev) | ❌ | todo | ❌ | ❌ | ❌ | ❌ |
| [ifunny](https://ifunny.4o1x5.dev) | ❌ | no nixpkgs/docker | ❌ | ❌ | ❌ | ❌ |
| [tenor](https://tenor.4o1x5.dev) | ❌ | no nixpkgs/docker | ❌ | ❌ | ❌ | ❌ |
| [knowyourmeme](https://knowyourmeme.4o1x5.dev) | ❌ | no nixpkgs/docker | ❌ | ❌ | ❌ | ❌ |
| [urbandictionary](https://urbandictionary.4o1x5.dev) | ❌ | no nixpkgs/docker | ❌ | ❌ | ❌ | ❌ |
| [biblioreads](https://biblioreads.4o1x5.dev) | ✅ | works | ❌ | ❌ | ❌ | ❌ |
| [wolframalpha](https://wolframalpha.4o1x5.dev) | ❌ | no nixpkgs/docker | ❌ | ❌ | ❌ | ❌ |
| [wikiless](https://wikiless.4o1x5.dev) | ❌ | todo no nixpkgs/docker | ❌ | ❌ | ❌ | ❌ |
I want to share my instances for public use, but most of these services code are hosted on github, meaning I would have to sign up and make a pull request there, which I will never do.

View file

@ -39,7 +39,7 @@
};
services.xserver = {
layout = "us";
xkb.layout = "us";
};
users.users."root".openssh.authorizedKeys.keys = [
@ -60,9 +60,12 @@
nixpkgs.config.allowUnfree = true;
system.stateVersion = "23.11";
# DOCKER
environment.systemPackages = with pkgs; [
docker-compose
neovim
git
zip
w3m
];
virtualisation.docker = {