From 102d6125ebd166b6b94fdf3a1c02b66a0eb3b1a1 Mon Sep 17 00:00:00 2001 From: Ariel Richtman <10679234+arichtman@users.noreply.github.com> Date: Sun, 29 Dec 2024 15:28:56 +1000 Subject: [PATCH] chore: switch kubernetes over to public domain --- README.md | 4 +- certificates/control-node-certs.sh | 34 +++++----- certificates/keysync.sh | 11 --- certificates/worker-node-certs.sh | 18 ++--- cilium.yaml | 2 +- label.sh | 28 ++++---- modules/home/personal-machine/default.nix | 3 + modules/nixos/control-node/monitoring.nix | 17 ++--- modules/nixos/k8s/apiserver.nix | 4 +- modules/nixos/k8s/controller.nix | 14 ++-- modules/nixos/k8s/kubelet.nix | 83 ++++++++++++++++------- modules/nixos/k8s/scheduler.nix | 2 +- modules/nixos/lab-node/default.nix | 1 - 13 files changed, 125 insertions(+), 96 deletions(-) delete mode 100755 certificates/keysync.sh diff --git a/README.md b/README.md index 0216917..e4846a5 100644 --- a/README.md +++ b/README.md @@ -474,7 +474,7 @@ step certificate create cluster-admin cluster-admin.pem cluster-admin-key.pem \ --set organization=system:masters # Construct the kubeconfig file # Here we're embedding certificates to avoid breaking stuff if we move or remove cert files -kubectl config set-cluster home --server https://fat-controller.local:6443 --certificate-authority ca.pem --embed-certs=true +kubectl config set-cluster home --server https://fat-controller.systems.richtman.au:6443 --certificate-authority ca.pem --embed-certs=true kubectl config set-credentials home-admin --client-certificate cluster-admin.pem --client-key cluster-admin-key.pem --embed-certs=true kubectl config set-context --user home-admin --cluster home home-admin ``` @@ -641,7 +641,7 @@ some _very_ wip notes about the desktop. Some diagnostic tests for mDNS: ``` -export HOST_NAME=fat-controller.local. +export HOST_NAME=fat-controller.systems.richtman.au. # This is our bedrock of truth. It works consistently and can be easily viewed avahi-resolve-host-name $HOST_NAME tcpdump udp port 5353 # Optionally -Qin diff --git a/certificates/control-node-certs.sh b/certificates/control-node-certs.sh index 569c8e3..eba0692 100755 --- a/certificates/control-node-certs.sh +++ b/certificates/control-node-certs.sh @@ -7,7 +7,7 @@ export NODE_DNS_NAME="${1}" # etcd TLS step certificate create etcd etcd-tls.pem etcd-tls-key.pem --ca etcd.pem --ca-key etcd-key.pem \ --insecure --no-password --template granular-dn-leaf.tpl --set-file dn-defaults.json --not-after 8760h --bundle \ - --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.internal" --san "${NODE_DNS_NAME}.local" --san localhost --san 127.0.0.1 --san ::1 + --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.internal" --san "${NODE_DNS_NAME}.systems.richtman.au" --san localhost --san 127.0.0.1 --san ::1 # apiserver client to etcd step certificate create kube-apiserver-etcd-client kube-apiserver-etcd-client.pem kube-apiserver-etcd-client-key.pem \ @@ -18,9 +18,9 @@ step certificate create kube-apiserver-etcd-client kube-apiserver-etcd-client.pe # Note that your local domain and private IP for in-cluster may vary step certificate create kube-apiserver kube-apiserver-tls.pem kube-apiserver-tls-key.pem --ca ca.pem --ca-key ca-key.pem \ --insecure --no-password --template granular-dn-leaf.tpl --set-file dn-defaults.json --not-after 8760h --bundle \ - --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.local" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 --san 10.0.0.1 \ + --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.systems.richtman.au" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 --san 10.0.0.1 \ --san kubernetes --san kubernetes.default --san kubernetes.default.svc \ - --san kubernetes.default.svc.cluster --san kubernetes.default.svc.cluster.local + --san kubernetes.default.svc.cluster --san kubernetes.default.svc.cluster.systems.richtman.au # service account token signing openssl req -new -x509 -days 365 -newkey rsa:4096 -keyout service-account-key.pem -sha256 \ @@ -35,7 +35,7 @@ step certificate create system:kube-controller-manager controllermanager-apiserv # Controller manager TLS step certificate create kube-controllermanager controllermanager-tls-cert-file.pem controllermanager-tls-private-key-file.pem --ca ca.pem --ca-key ca-key.pem \ --insecure --no-password --template granular-dn-leaf.tpl --set-file dn-defaults.json --not-after 8760h --bundle \ - --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.local" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 + --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.systems.richtman.au" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 # Scheduler apiserver client step certificate create system:kube-scheduler scheduler-apiserver-client.pem scheduler-apiserver-client-key.pem \ @@ -45,7 +45,7 @@ step certificate create system:kube-scheduler scheduler-apiserver-client.pem sch # Scheduler TLS step certificate create scheduler scheduler-tls-cert-file.pem scheduler-tls-private-key-file.pem --ca ca.pem --ca-key ca-key.pem \ --insecure --no-password --template granular-dn-leaf.tpl --set-file dn-defaults.json --not-after 8760h --bundle \ - --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.local" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 + --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.systems.richtman.au" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 # APIserver client to kubelet step certificate create "system:node:${NODE_DNS_NAME}" kubelet-apiserver-client.pem kubelet-apiserver-client-key.pem \ @@ -53,21 +53,21 @@ step certificate create "system:node:${NODE_DNS_NAME}" kubelet-apiserver-client. --not-after 8760h --set organization=system:nodes # Copy everything over, using ~ so we don't hit permissions issues -rsync service-account*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" -rsync scheduler*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" -rsync etcd*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" -rsync controller*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" -rsync kube*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" -rsync ca*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" +rsync service-account*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" +rsync scheduler*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" +rsync etcd*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" +rsync controller*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" +rsync kube*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" +rsync ca*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" # Remove any existing secrets so it's just this run -ssh "${NODE_DNS_NAME}.local" sudo rm -fr /var/lib/kubernetes/secrets +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo rm -fr /var/lib/kubernetes/secrets # Shift our stuff into the protected location -ssh "${NODE_DNS_NAME}.local" sudo mv --force "~/secrets" /var/lib/kubernetes/ +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo mv --force "~/secrets" /var/lib/kubernetes/ # Everything owned by the kube service user -ssh "${NODE_DNS_NAME}.local" sudo chown kubernetes: "/var/lib/kubernetes/secrets/*.pem" +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo chown kubernetes: "/var/lib/kubernetes/secrets/*.pem" # Lock permissions a bit -ssh "${NODE_DNS_NAME}.local" sudo chmod 444 "/var/lib/kubernetes/secrets/*.pem" -ssh "${NODE_DNS_NAME}.local" sudo chmod 400 "/var/lib/kubernetes/secrets/*key*.pem" +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo chmod 444 "/var/lib/kubernetes/secrets/*.pem" +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo chmod 400 "/var/lib/kubernetes/secrets/*key*.pem" # Set ownership of etcd stuff specifically -ssh "${NODE_DNS_NAME}.local" sudo chown etcd: "/var/lib/kubernetes/secrets/etcd*.pem" +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo chown etcd: "/var/lib/kubernetes/secrets/etcd*.pem" diff --git a/certificates/keysync.sh b/certificates/keysync.sh deleted file mode 100755 index 9a2da27..0000000 --- a/certificates/keysync.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -eu - -rsync etcd*.pem "${1}.local:/home/nixos/kubernetes" -rsync kube*.pem "${1}.local:/home/nixos/kubernetes" -rsync ca*.pem "${1}.local:/home/nixos/kubernetes" -rsync proxy-*.pem "${1}.local:/home/nixos/kubernetes" -ssh "${1}.internal" sudo cp "./kubernetes/*.pem" /var/lib/kubernetes/secrets -ssh "${1}.internal" sudo chown kubernetes: "/var/lib/kubernetes/secrets/*.pem" -ssh "${1}.internal" sudo chown etcd: "/var/lib/kubernetes/secrets/etcd*.pem" -ssh "${1}.internal" sudo chmod 444 "/var/lib/kubernetes/secrets/*.pem" -ssh "${1}.internal" sudo chmod 400 "/var/lib/kubernetes/secrets/*key*.pem" diff --git a/certificates/worker-node-certs.sh b/certificates/worker-node-certs.sh index 968dd10..a235834 100755 --- a/certificates/worker-node-certs.sh +++ b/certificates/worker-node-certs.sh @@ -12,7 +12,7 @@ step certificate create kubelet-kubeconfig-client-certificate kubelet-kubeconfig # kubelet TLS step certificate create kubelet kubelet-tls-cert-file.pem kubelet-tls-private-key-file.pem --ca ca.pem --ca-key ca-key.pem \ --insecure --no-password --template granular-dn-leaf.tpl --set-file dn-defaults.json --not-after 8760h --bundle \ - --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.local" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 + --san "${NODE_DNS_NAME}" --san "${NODE_DNS_NAME}.systems.richtman.au" --san "${NODE_DNS_NAME}.internal" --san localhost --san 127.0.0.1 --san ::1 # # For client authentication to the proxy services # step certificate create kube-apiserver-proxy-client kube-apiserver-proxy-client.pem kube-apiserver-proxy-client-key.pem \ # --ca ca.pem --ca-key ca-key.pem --insecure --no-password --template granular-dn-leaf.tpl --set-file dn-defaults.json \ @@ -22,14 +22,14 @@ step certificate create kubelet kubelet-tls-cert-file.pem kubelet-tls-private-ke # --ca ca.pem --ca-key ca-key.pem --insecure --no-password --template granular-dn-leaf.tpl --set-file dn-defaults.json \ # --not-after 8760h --set organization=system:node-proxier -# rsync proxy-*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" +# rsync proxy-*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" -rsync kubelet*.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" -rsync ca.pem "${NODE_DNS_NAME}.local:/home/nixos/secrets" +rsync kubelet*.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" +rsync ca.pem "${NODE_DNS_NAME}.systems.richtman.au:/home/nixos/secrets" # Kubelet needs to run as root so the specific files that it accesses should be owned by it. -ssh "${NODE_DNS_NAME}.local" sudo rm -fr /var/lib/kubelet/secrets/ -ssh "${NODE_DNS_NAME}.local" sudo mv --force "~/secrets" /var/lib/kubelet/ -ssh "${NODE_DNS_NAME}.local" sudo chown root: "/var/lib/kubelet/secrets/*.pem" -ssh "${NODE_DNS_NAME}.local" sudo chmod 444 "/var/lib/kubelet/secrets/*.pem" -ssh "${NODE_DNS_NAME}.local" sudo chmod 400 "/var/lib/kubelet/secrets/*key*.pem" +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo rm -fr /var/lib/kubelet/secrets/ +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo mv --force "~/secrets" /var/lib/kubelet/ +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo chown root: "/var/lib/kubelet/secrets/*.pem" +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo chmod 444 "/var/lib/kubelet/secrets/*.pem" +ssh "${NODE_DNS_NAME}.systems.richtman.au" sudo chmod 400 "/var/lib/kubelet/secrets/*key*.pem" diff --git a/cilium.yaml b/cilium.yaml index e8de604..4ad7453 100644 --- a/cilium.yaml +++ b/cilium.yaml @@ -3,7 +3,7 @@ kubeProxyReplacement: true # Enables healthz endpoint kubeProxyReplacementHealthzBindAddr: "[::]:10256" # Required to bypass the non-working default APIserver service without kube-proxy -k8sServiceHost: fat-controller.local +k8sServiceHost: fat-controller.systems.richtman.au k8sServicePort: 6443 # Set our networking stack ipv4: diff --git a/label.sh b/label.sh index 7d638c6..b816e49 100755 --- a/label.sh +++ b/label.sh @@ -1,18 +1,18 @@ #!/bin/env bash -kubectl label no/fat-controller.local node-role.kubernetes.io/master=master -kubectl label no/fat-controller.local kubernetes.richtman.au/ephemeral=false +kubectl label no/fat-controller.systems.richtman.au node-role.kubernetes.io/master=master +kubectl label no/fat-controller.systems.richtman.au kubernetes.richtman.au/ephemeral=false -kubectl label no/mum.local node-role.kubernetes.io/worker=worker -kubectl label no/mum.local kubernetes.richtman.au/ephemeral=false +kubectl label no/mum.systems.richtman.au node-role.kubernetes.io/worker=worker +kubectl label no/mum.systems.richtman.au kubernetes.richtman.au/ephemeral=false -kubectl label no/patient-zero.local node-role.kubernetes.io/worker=worker -kubectl label no/patient-zero.local kubernetes.richtman.au/ephemeral=true -kubectl label no/dr-singh.local node-role.kubernetes.io/worker=worker -kubectl label no/dr-singh.local kubernetes.richtman.au/ephemeral=true -kubectl label no/smol-bat.local node-role.kubernetes.io/worker=worker -kubectl label no/smol-bat.local kubernetes.richtman.au/ephemeral=true -kubectl label no/tweedledee.local node-role.kubernetes.io/worker=worker -kubectl label no/tweedledee.local kubernetes.richtman.au/ephemeral=true -kubectl label no/tweedledum.local node-role.kubernetes.io/worker=worker -kubectl label no/tweedledum.local kubernetes.richtman.au/ephemeral=true +kubectl label no/patient-zero.systems.richtman.au node-role.kubernetes.io/worker=worker +kubectl label no/patient-zero.systems.richtman.au kubernetes.richtman.au/ephemeral=true +kubectl label no/dr-singh.systems.richtman.au node-role.kubernetes.io/worker=worker +kubectl label no/dr-singh.systems.richtman.au kubernetes.richtman.au/ephemeral=true +kubectl label no/smol-bat.systems.richtman.au node-role.kubernetes.io/worker=worker +kubectl label no/smol-bat.systems.richtman.au kubernetes.richtman.au/ephemeral=true +kubectl label no/tweedledee.systems.richtman.au node-role.kubernetes.io/worker=worker +kubectl label no/tweedledee.systems.richtman.au kubernetes.richtman.au/ephemeral=true +kubectl label no/tweedledum.systems.richtman.au node-role.kubernetes.io/worker=worker +kubectl label no/tweedledum.systems.richtman.au kubernetes.richtman.au/ephemeral=true diff --git a/modules/home/personal-machine/default.nix b/modules/home/personal-machine/default.nix index afb355e..7870700 100644 --- a/modules/home/personal-machine/default.nix +++ b/modules/home/personal-machine/default.nix @@ -25,6 +25,9 @@ in "*.local" = { user = "nixos"; }; + "*.systems.richtman.au" = { + user = "nixos"; + }; github = { hostname = "github.com"; user = "git"; diff --git a/modules/nixos/control-node/monitoring.nix b/modules/nixos/control-node/monitoring.nix index e78e0ca..ce8c71d 100644 --- a/modules/nixos/control-node/monitoring.nix +++ b/modules/nixos/control-node/monitoring.nix @@ -14,7 +14,7 @@ "localhost:${builtins.toString port}" ]; labels = { - instance = "fat-controller.local"; + instance = "fat-controller.systems.richtman.au"; }; } ]; @@ -26,7 +26,7 @@ source_labels = ["__address__"]; regex = ".*localhost.*"; target_label = "instance"; - replacement = "fat-controller.local"; + replacement = "fat-controller.systems.richtman.au"; } # Remove port numbers { @@ -136,18 +136,19 @@ in { "localhost:9100" ]; labels = { - instance = "fat-controller.local"; + instance = "fat-controller.systems.richtman.au"; }; } { targets = [ "opnsense.internal:9100" "proxmox.internal:9100" - "patient-zero.local:9100" - "dr-singh.local:9100" - "smol-bat.local:9100" - "tweedledee.local:9100" - "tweedledum.local:9100" + "mum.systems.richtman.au:9100" + "patient-zero.systems.richtman.au:9100" + "dr-singh.systems.richtman.au:9100" + "smol-bat.systems.richtman.au:9100" + "tweedledee.systems.richtman.au:9100" + "tweedledum.systems.richtman.au:9100" ]; } ]; diff --git a/modules/nixos/k8s/apiserver.nix b/modules/nixos/k8s/apiserver.nix index d33c1fb..cab1bd4 100644 --- a/modules/nixos/k8s/apiserver.nix +++ b/modules/nixos/k8s/apiserver.nix @@ -61,8 +61,8 @@ "${cfg.secretsPath}/service-account-key.pem" # TODO: Revisit "--service-cluster-ip-range" - "2001:db8:1234:5678:8:3::/112" - # "2403:580a:e4b1::/108" + # "2001:db8:1234:5678:8:3::/112" + "2403:580a:e4b1::/108" # Can't mix public and private # "10.100.100.0/24,2403:580a:e4b1:fffd::/64" "--tls-cert-file" diff --git a/modules/nixos/k8s/controller.nix b/modules/nixos/k8s/controller.nix index 86a8091..eea2c40 100644 --- a/modules/nixos/k8s/controller.nix +++ b/modules/nixos/k8s/controller.nix @@ -23,7 +23,7 @@ name = "default"; cluster = { certificate-authority = "${topConfig.secretsPath}/ca.pem"; - server = "https://fat-controller.local:6443"; + server = "https://fat-controller.systems.richtman.au:6443"; }; } ]; @@ -56,14 +56,14 @@ if (builtins.substring 0 2 x) == "--" then "${x}=" else "${x} ") [ - "--allocate-node-cidrs" - "true" - "--service-cluster-ip-range" - "2001:db8:1234:5678:8:3::/112" + # "--allocate-node-cidrs" + # "true" + # "--service-cluster-ip-range" + # "2001:db8:1234:5678:8:3::/112" "--node-cidr-mask-size" "120" - "--cluster-cidr" - "2001:db8:1234:5678:8:2::/104" + # "--cluster-cidr" + # "2001:db8:1234:5678:8:2::/104" "--authorization-kubeconfig" controllerKubeconfigFile # "--authentication-kubeconfig" diff --git a/modules/nixos/k8s/kubelet.nix b/modules/nixos/k8s/kubelet.nix index 7c6eda3..faef740 100644 --- a/modules/nixos/k8s/kubelet.nix +++ b/modules/nixos/k8s/kubelet.nix @@ -70,7 +70,7 @@ name = "default"; cluster = { certificate-authority = "${kubeletSecretsPath}/ca.pem"; - server = "https://fat-controller.local:6443"; + server = "https://fat-controller.systems.richtman.au:6443"; }; } ]; @@ -86,6 +86,7 @@ current-context = "default"; }; kubeletKubeconfigFile = pkgs.writeText "kubelet-kubeconfig" (builtins.toJSON kubeletKubeconfig); + # Ref: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ serviceArgs = lib.concatMapStrings (x: if (builtins.substring 0 2 x) == "--" then "${x}=" @@ -94,15 +95,17 @@ kubeletConfigFile "--node-ip" "::" - "--kubeconfig" - kubeletKubeconfigFile "--config-dir" kubeletConfigDropinPath - # Seems to be necessary to allow the kubelet to register it's HostName address type - # with the domain qualification. - # TODO: See about having the golang DNS resolution stack include mDNS :eyeroll: + # Seems to be necessary to allow the kubelet to register it's HostName address type with the domain qualification. + # I can't locate a cluster external domain setting or a dns search domain. + # GoLang running it's own DNS stack doesn't help here either. "--hostname-override" - "${config.networking.hostName}.local" + config.networking.fqdn + "--kubeconfig" + kubeletKubeconfigFile + "--node-labels" + "kubernetes.richtman.au/myval=true" "--v" # TODO: Remove after debugging "2" ]; @@ -180,31 +183,29 @@ in { }; }; }; - # This is just to bootstrap us into being able to run containers, - # since Cilium needs to run some to deploy itself. - # Linux itself requires the loopback device apparently, - # and for this reason I think containerd won't actually launch containers if /etc/cni/net.d has no configurations + # This is just to bootstrap us into being able to run containers, since Cilium needs to run some to deploy itself. + # Linux itself requires the loopback device apparently, and for this reason I think containerd won't actually launch containers if /etc/cni/net.d has no configurations # This doesn't work tho - has issues finding the sandbox so can't actually run pods environment.etc = { # "cni/net.d/99-loopback.conf".text = '' # { - # "cniVersion": "0.3.1", + # "cniVersion": "1.0.0", # "name": "lo", # "type": "loopback" # } # ''; # Ref: https://github.com/containernetworking/plugins/tree/main/plugins/main/dummy - # "cni/net.d/98-dummy.conf".text = '' - # { - # "cniVersion": "0.3.1", - # "name": "mynet", - # "type": "dummy", - # "ipam": { - # "type": "host-local", - # "subnet": "10.1.2.0/24" - # } - # } - # ''; + "cni/net.d/98-dummy.conf".text = '' + { + "cniVersion": "1.0.0", + "name": "mynet", + "type": "dummy", + "ipam": { + "type": "host-local", + "subnet": "10.1.2.0/24" + } + } + ''; # "cni/net.d/97-mixed.conflist".text = '' # { # "cniVersion": "1.0.0", @@ -242,6 +243,42 @@ in { # ] # } # ''; + # Ref: https://kubernetes.io/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors/ + # "cni/net.d/96-containerd-net.conflist".text = '' + # { + # "cniVersion": "1.0.0", + # "name": "containerd-net", + # "plugins": [ + # { + # "type": "bridge", + # "bridge": "cni0", + # "isGateway": true, + # "ipMasq": true, + # "promiscMode": true, + # "ipam": { + # "type": "host-local", + # "ranges": [ + # [{ + # "subnet": "10.88.0.0/16" + # }], + # [{ + # "subnet": "2001:db8:4860::/64" + # }] + # ], + # "routes": [ + # { "dst": "0.0.0.0/0" }, + # { "dst": "::/0" } + # ] + # } + # }, + # { + # "type": "portmap", + # "capabilities": {"portMappings": true}, + # "externalSetMarkChain": "KUBE-MARK-MASQ" + # } + # ] + # } + # ''; }; }; } diff --git a/modules/nixos/k8s/scheduler.nix b/modules/nixos/k8s/scheduler.nix index 18059a3..7d2ceea 100644 --- a/modules/nixos/k8s/scheduler.nix +++ b/modules/nixos/k8s/scheduler.nix @@ -23,7 +23,7 @@ name = "default"; cluster = { certificate-authority = "${topConfig.secretsPath}/ca.pem"; - server = "https://fat-controller.local:6443"; + server = "https://fat-controller.systems.richtman.au:6443"; }; } ]; diff --git a/modules/nixos/lab-node/default.nix b/modules/nixos/lab-node/default.nix index 99cd2ec..0d08cc1 100644 --- a/modules/nixos/lab-node/default.nix +++ b/modules/nixos/lab-node/default.nix @@ -104,7 +104,6 @@ in # Ref: https://github.com/avahi/avahi/blob/master/avahi-daemon/avahi-daemon.conf avahi = { enable = true; - # domainName = "internal"; publish = { enable = true; domain = true;