Parcourir la source

Use 'node' pillar instead of 'nodes:' ~ grains.id

  With the latest version of NACL's ext_pillar module the configuration for
  the local minion, so what has been present within the "nodes:<minion_id>"
  pillar key up to now, is now also exposed within the "node" pillar key.

  The idea behind this change is to expose all information relevant for the
  configuration of any given minion within the "node" pillar key and to not
  calculate or derive any configuration bits inside Templates, and ideally,
  modules.  All computation should happen before Salt, and Salt should just
  be the means to write configuration files, install packages, and maintain
  services.

  This commit changes all occurances where the old style "nodes:<minion_id>"
  pillar key is used to access the generate minion configuration to the new
  "node" pillar key where this is possible.

Signed-off-by: Maximilian Wilhelm <max@sdn.clinic>
Maximilian Wilhelm il y a 1 mois
Parent
commit
60a5299b0b
52 fichiers modifiés avec 82 ajouts et 80 suppressions
  1. 1 1
      anycast-healthchecker/init.sls
  2. 1 1
      bash/init.sls
  3. 1 1
      batman/init.sls
  4. 2 2
      bird/IGP.conf
  5. 1 1
      bird/VRF_external.conf
  6. 1 1
      bird/bird.conf
  7. 2 2
      bird/ff-policy.conf
  8. 1 1
      bird/ffrl.conf
  9. 1 1
      bird/ibgp.conf
  10. 1 1
      bird/icinga2/ibgp_sessions_down_ok.txt.tmpl
  11. 1 1
      bird/icinga2/ospf_interfaces_down_ok.txt.tmpl
  12. 2 2
      bird/init.sls
  13. 2 2
      bird/l3-access.conf
  14. 1 1
      bird/mesh_routes.conf
  15. 1 1
      bird/radv.conf
  16. 2 2
      build/init.sls
  17. 1 1
      burp/client.sls
  18. 2 2
      certs/init.sls
  19. 1 1
      dhcp-server/dhcpd.conf
  20. 1 1
      dhcp-server/dhcpd.default
  21. 1 1
      dns-server/init.sls
  22. 1 1
      fastd/fastd.conf
  23. 1 1
      fastd/init.sls
  24. 4 4
      ffinfo/init.sls
  25. 4 3
      firmware/init.sls
  26. 1 1
      firmware/update-firmware
  27. 1 1
      gogs/init.sls
  28. 1 1
      grafana/init.sls
  29. 2 2
      icinga2/init.sls
  30. 1 1
      icingaweb2/init.sls
  31. 1 1
      network/bootstrap.sls
  32. 1 1
      network/ifupdown2/reload.sls
  33. 1 1
      network/init.sls
  34. 1 1
      network/interfaces/interfaces.tmpl
  35. 1 1
      network/link.sls
  36. 2 2
      nftables/init.sls
  37. 1 1
      nginx/firmware.srv.in.ffho.net
  38. 1 1
      nginx/init.sls
  39. 1 1
      nginx/nginx.conf
  40. 1 1
      openvpn/init.sls
  41. 2 2
      postfix/init.sls
  42. 2 2
      pppoe/pap-secrets
  43. 2 2
      pppoe/tkom_peer.tmpl
  44. 1 1
      prometheus-exporters/init.sls
  45. 8 7
      respondd/init.sls
  46. 1 1
      rsyslog/init.sls
  47. 2 2
      snmpd/snmpd.conf.tmpl
  48. 1 1
      ssh/authorized_keys.tmpl
  49. 5 5
      ssh/init.sls
  50. 1 1
      sysctl/init.sls
  51. 1 1
      wireguard/init.sls
  52. 2 2
      yanic/init.sls

+ 1 - 1
anycast-healthchecker/init.sls

@@ -2,7 +2,7 @@
 # Anycast Healthchecker
 #
 
-{% set node_roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+{% set node_roles = salt['pillar.get']('node:roles', []) %}
 {% set config = salt['pillar.get']('anycast-healtchecker', {}) %}
 
 include:

+ 1 - 1
bash/init.sls

@@ -11,7 +11,7 @@
 
 #
 # Nifty aliases for gateway
-{% if 'batman_gw' in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+{% if 'batman_gw' in salt['pillar.get']('node:roles', []) %}
 /root/.bash_aliases:
   file.managed:
     - source: salt://bash/bash_aliases.root

+ 1 - 1
batman/init.sls

@@ -6,7 +6,7 @@
 # Only set up batman and load batman_adv kernel module if the role »batman«
 # has been configured for this node.
 #
-{%- set roles = salt['pillar.get']('nodes:' ~ grains['id']  ~ ':roles', []) %}
+{%- set roles = salt['pillar.get']('node:roles', []) %}
 
 {%- if 'batman' in roles %}
 batctl:

+ 2 - 2
bird/IGP.conf

@@ -2,7 +2,7 @@
 # FFHO IGP / OSPF configuration (Salt managed)
 #
 
-{%- set node_config = salt['pillar.get']('nodes:' ~ grains['id'], {}) %}
+{%- set node_config = salt['pillar.get']('node', {}) %}
 {%- set roles = node_config.get ('roles', []) %}
 {%- set ospf_node_config = node_config.get('ospf', {}) %}
 {%- if 'stub_router' in ospf_node_config and ospf_node_config['stub_router'] in [ True, 'yes'] %}
@@ -64,7 +64,7 @@ protocol ospf IGP {
   {%- endif %}
 
   {#- Interface description? #}
-  {%- set desc = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':ifaces:' ~ iface ~ ':desc', "") %}
+  {%- set desc = salt['pillar.get']('node:ifaces:' ~ iface ~ ':desc', "") %}
 
 		# {{ desc }}
 		interface "{{ iface }}" {

+ 1 - 1
bird/VRF_external.conf

@@ -2,7 +2,7 @@
 #                              Internet table                                  #
 ################################################################################
 
-{%- set ifaces = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':ifaces', {}) %}
+{%- set ifaces = salt['pillar.get']('node:ifaces', {}) %}
 {%- set have_vrf_external = [] %}
 {%- for iface, iface_config in ifaces.items () %}
   {%- if iface_config.get ('vrf', '') == 'vrf_external' %}

+ 1 - 1
bird/bird.conf

@@ -1,7 +1,7 @@
 #
 # IP{{ proto }} Bird configuration (Salt managed)
 #
-{%- set node_config = salt['pillar.get']('nodes:' ~ grains['id'], {}) %}
+{%- set node_config = salt['pillar.get']('node', {}) %}
 
 define AS_OWN  = 65132;
 define LO_IP = {{ salt['ffho_net.get_primary_ip'](node_config, proto).ip }};

+ 2 - 2
bird/ff-policy.conf

@@ -1,8 +1,8 @@
 #
 # FFHO Routing Policy
 #
-{%- set node_roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
-{%- set node_sites = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':sites', []) %}
+{%- set node_roles = salt['pillar.get']('node:roles', []) %}
+{%- set node_sites = salt['pillar.get']('node:sites', []) %}
 {%- set sites = salt['pillar.get']('sites', {}) %}
 {%- set te = salt['pillar.get']('te', {}) %}
 

+ 1 - 1
bird/ffrl.conf

@@ -1,7 +1,7 @@
 #
 # FFRL upstream (Salt managed)
 #
-{%- set ifaces = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':ifaces', {}) %}
+{%- set ifaces = salt['pillar.get']('node:ifaces', {}) %}
 {%- set sessions = salt['ffho_net.get_ffrl_bgp_config'](ifaces, proto) %}
 {%- set te_community_map_ffrl = salt['pillar.get']('te:community_map:' ~ grains['id'] ~ ':ffrl', [])|sort %}
 

+ 1 - 1
bird/ibgp.conf

@@ -14,7 +14,7 @@ template bgp ibgp {
 	graceful restart yes;
 }
 
-{%- set peers = salt['pillar.get']("nodes:" ~ grains.id ~ ":routing:bgp:internal:peers:" ~ family, []) %}
+{%- set peers = salt['pillar.get']("node:routing:bgp:internal:peers:" ~ family, []) %}
 {% for peer_config in peers %}
 protocol bgp {{ peer_config.get ('node')|replace(".", "_")|replace("-", "_") }} from ibgp {
 	neighbor {{ peer_config.get ('ip') }} as AS_OWN;

+ 1 - 1
bird/icinga2/ibgp_sessions_down_ok.txt.tmpl

@@ -2,7 +2,7 @@
 # BGP sessions which are OK to be down (Salt managed)
 #
 {#- Gather information for iBGP sessions #}
-{%- set roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+{%- set roles = salt['pillar.get']('node:roles', []) %}
 {%- set inactive_peers = [] %}
 {%- for node in salt['pillar.get']('nodes', [])|sort if node != grains['id'] %}
   {%- set peer_node_config = salt['pillar.get']('nodes:' ~ node) %}

+ 1 - 1
bird/icinga2/ospf_interfaces_down_ok.txt.tmpl

@@ -1,7 +1,7 @@
 #
 # Interfaces which are OK to be down in OSPF (Salt managed)
 #
-{%- set interfaces = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':routing:ospf:ifaces_down_ok', []) %}
+{%- set interfaces = salt['pillar.get']('node:routing:ospf:ifaces_down_ok', []) %}
 {%- for iface in interfaces %}
 {{ iface }}
 {%- endfor %}

+ 2 - 2
bird/init.sls

@@ -2,8 +2,8 @@
 # Bird routing daemon
 #
 
-{%- set roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
-{%- set status = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':status', 'active') %}
+{%- set roles = salt['pillar.get']('node:roles', []) %}
+{%- set status = salt['pillar.get']('node:status', 'active') %}
 
 include:
   - network.interfaces

+ 2 - 2
bird/l3-access.conf

@@ -3,8 +3,8 @@
 #
 
 protocol direct l3_access {
-{%- for iface in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':ifaces')|sort %}
-  {%- set config = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':ifaces:' ~ iface) %}
+{%- for iface in salt['pillar.get']('node:ifaces')|sort %}
+  {%- set config = salt['pillar.get']('node:ifaces:' ~ iface) %}
   {%- if salt['ffho.re_search']('^vlan(3\d\d|29\d\d)$', iface) or 'l3-access' in config.get ('tags', []) %}
 	interface "{{ iface }}";
   {%- endif %}

+ 1 - 1
bird/mesh_routes.conf

@@ -2,7 +2,7 @@
 # Learn mesh prefixes (Salt managed)
 #
 
-{% for site in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':sites', []) %}
+{% for site in salt['pillar.get']('node:sites', []) %}
 protocol direct mesh_{{ salt['ffho.re_replace']('-', '_', site) }} {
 	interface "br-{{ site }}";
 	check link yes;

+ 1 - 1
bird/radv.conf

@@ -1,4 +1,4 @@
-{%- set node_config = salt['pillar.get']('nodes:' ~ grains['id']) %}
+{%- set node_config = salt['pillar.get']('node') %}
 {%- set sites_config = salt['pillar.get']('sites') %}
 protocol radv {
         # ONLY advertise prefix, IF default route is available

+ 2 - 2
build/init.sls

@@ -30,7 +30,7 @@ build:
 
 /home/build/.ssh/id_rsa:
   file.managed:
-    - contents_pillar: nodes:{{grains.id}}:ssh:build:privkey
+    - contents_pillar: node:ssh:build:privkey
     - mode: 600
     - user: build
     - makedirs: True
@@ -39,7 +39,7 @@ build:
 
 /home/build/.ssh/id_rsa.pub:
   file.managed:
-    - contents_pillar: nodes:{{grains.id}}:ssh:build:privkey
+    - contents_pillar: node:ssh:build:privkey
     - makedirs: True
     - require:
       - user: build

+ 1 - 1
burp/client.sls

@@ -18,4 +18,4 @@ burp-client:
     - source: salt://burp/client/burp.conf.tmpl
     - template: jinja
       burp_server_name: {{ salt['pillar.get']('burp:server:fqdn') }}
-      burp_password: {{ salt['pillar.get']('nodes:' ~ grains.id ~ ':burp:password') }}
+      burp_password: {{ salt['pillar.get']('node:burp:password') }}

+ 2 - 2
certs/init.sls

@@ -41,13 +41,13 @@ generate-dhparam:
 {% set certs = {} %}
 
 # Are there any certificates defined or referenced in the node pillar?
-{% set node_config = salt['pillar.get']('nodes:' ~ grains['id']) %}
+{% set node_config = salt['pillar.get']('node') %}
 {% for cn, cert_config in node_config.get ('certs', {}).items () %}
   {% set pillar_name = None %}
 
   {# "cert" and "privkey" provided in node config? #}
   {% if 'cert' in cert_config and 'privkey' in cert_config %}
-    {% set pillar_name = 'nodes:' ~ grains['id'] ~ ':certs:' ~ cn %}
+    {% set pillar_name = 'node:certs:' ~ cn %}
 
   {# <cn> only referenced in node config and cert/privkey stored in "cert" pillar? #}
   {% elif cert_config.get ('install', False) == True %}

+ 1 - 1
dhcp-server/dhcpd.conf

@@ -1,4 +1,4 @@
-{%- set dhcp_prefixes = salt['pillar.get']("nodes:" ~ grains["id"] ~ ":dhcp:server:prefixes", []) -%}
+{%- set dhcp_prefixes = salt['pillar.get']("node:dhcp:server:prefixes", []) -%}
 {%- set dns_resolver_IP = salt["pillar.get"]("globals:dns:resolver_v4") -%}
 {%- set dns_search_domain = salt["pillar.get"]("globals:dns:search") -%}
 {%- set unifi_address = salt["pillar.get"]("globals:unifi:address") -%}

+ 1 - 1
dhcp-server/dhcpd.default

@@ -1,4 +1,4 @@
-{%- set dhcp_interfaces = salt['pillar.get']("nodes:" ~ grains["id"] ~ ":dhcp:server:ifaces", []) -%}
+{%- set dhcp_interfaces = salt['pillar.get']("node:dhcp:server:ifaces", []) -%}
 # Defaults for isc-dhcp-server (sourced by /etc/init.d/isc-dhcp-server) (Salt managed)
 
 # Path to dhcpd's config file (default: /etc/dhcp/dhcpd.conf).

+ 1 - 1
dns-server/init.sls

@@ -2,7 +2,7 @@
 # FFHO DNS Server configuration (authoritive / recursive)
 #
 
-{% set roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+{% set roles = salt['pillar.get']('node:roles', []) %}
 
 bind9:
   pkg.installed:

+ 1 - 1
fastd/fastd.conf

@@ -1,7 +1,7 @@
 #
 # {{ site }} / {{ network }} FASTd configuration (Salt managed)
 #
-{%- set node_config = salt['pillar.get']('nodes:' ~ grains['id']) %}
+{%- set node_config = salt['pillar.get']('node') %}
 
 log to syslog level info;
 

+ 1 - 1
fastd/init.sls

@@ -3,7 +3,7 @@
 #
 
 {% set sites_all = pillar.get ('sites') %}
-{% set node_config = salt['pillar.get']('nodes:' ~ grains.id, {}) %}
+{% set node_config = salt['pillar.get']('node', {}) %}
 {% set sites_node = node_config.get('sites', {}) %}
 {% set device_no = node_config.get('id', -1) %}
 

+ 4 - 4
ffinfo/init.sls

@@ -12,7 +12,7 @@
 # Generate /etc/freifunk/role file with main role the node has configured in NetBox
 /etc/freifunk/role:
   file.managed:
-    - contents: {{ salt['pillar.get']('nodes:' ~ grains['id'] ~ ':role', "") }}
+    - contents: {{ salt['pillar.get']('node:role', "") }}
 
 # Generate /etc/freifunk/roles file with all roles configured on the node,
 # one on each line.
@@ -20,7 +20,7 @@
   file.managed:
     - source: salt://ffinfo/list.tmpl
     - template: jinja
-      list: {{ salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) }}
+      list: {{ salt['pillar.get']('node:roles', []) }}
 
 # Generate /etc/freifunk/sites file with all sites configured on the node,
 # one on each line. Empty if no sites configured.
@@ -28,10 +28,10 @@
   file.managed:
     - source: salt://ffinfo/list.tmpl
     - template: jinja
-      list: {{ salt['pillar.get']('nodes:' ~ grains['id'] ~ ':sites', []) }}
+      list: {{ salt['pillar.get']('node:sites', []) }}
 
 # Generate /etc/freifunk/status file with the status of this node
-{% set status = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':status', 'active') %}
+{% set status = salt['pillar.get']('node:status', 'UNKNOWN') %}
 /etc/freifunk/status:
   file.managed:
     - contents: {{ status }}

+ 4 - 3
firmware/init.sls

@@ -1,6 +1,7 @@
 #
 # firmware
 #
+{% set firmware_path = salt['pillar.get']('node:path:firmware')
 
 firmware-pkgs:
   pkg.installed:
@@ -16,7 +17,7 @@ firmware-pkgs:
 
 firmware-git:
   file.directory:
-    - name: {{salt['pillar.get']('nodes:' ~ grains['id'] ~ ':path:firmware', [])}}
+    - name: {{ firmware_path }}
     - user: firmware
     - group: firmware
     - mode: 755
@@ -24,7 +25,7 @@ firmware-git:
       - user: firmware
   git.latest:
     - name: gogs@git.srv.in.ffho.net:FreifunkHochstift/ffho-firmware-website.git
-    - target: {{salt['pillar.get']('nodes:' ~ grains['id'] ~ ':path:firmware', [])}}
+    - target: {{ firmware_path }}
     - user: firmware
     - update_head: False
     - require:
@@ -35,7 +36,7 @@ firmware-git:
 firmware-changelog:
   cmd.run:
     - name: FORCE=1 /usr/local/sbin/update-firmware
-    - creates: {{salt['pillar.get']('nodes:' ~ grains['id'] ~ ':path:firmware', [])}}/stable/Changelog.html
+    - creates: {{ firmware_path }}/stable/Changelog.html
     - user: firmware
     - group: firmware
     - watch:

+ 1 - 1
firmware/update-firmware

@@ -2,7 +2,7 @@
 # (c) 2016 Freifunk Hochstift <kontakt@hochstift.freifunk.net>
 #
 
-DEFAULT_DIR="{{salt['pillar.get']('nodes:' ~ grains['id'] ~ ':path:firmware', [])}}"
+DEFAULT_DIR="{{ salt['pillar.get']('node:path:firmware') }}"
 DEFAULT_BRANCHES="stable testing experimental"
 
 getCurrentVersion() {

+ 1 - 1
gogs/init.sls

@@ -2,7 +2,7 @@
 # gogs
 #
 
-{% set config = salt['pillar.get']('nodes:' ~ grains.id ~ ':gogs', {}) %}
+{% set config = salt['pillar.get']('node:gogs', {}) %}
 
 gogs-repo:
   pkgrepo.managed:

+ 1 - 1
grafana/init.sls

@@ -4,7 +4,7 @@
 
 {% set grafana_cfg = salt['pillar.get']('grafana') %}
 
-{% set node_config = salt['pillar.get']('nodes:' ~ grains['id']) %}
+{% set node_config = salt['pillar.get']('node') %}
 {% if node_config.get('role') == "prometheus-server" %}
 include:
   - .prometheus

+ 2 - 2
icinga2/init.sls

@@ -1,7 +1,7 @@
 #
 # Icinga2
 #
-{% set roles = salt['pillar.get']('nodes:' ~ grains.id ~ ':roles', []) %}
+{% set roles = salt['pillar.get']('node:roles', []) %}
 
 include:
   - apt
@@ -125,7 +125,7 @@ ffho-plugins:
     - watch_in:
       - sevice: icinga2
 
-{% set pillar_name = 'nodes:' ~ grains['id'] ~ ':certs:' ~ grains['id'] %}
+{% set pillar_name = 'node:certs:' ~ grains['id'] %}
 /var/lib/icinga2/certs/{{ grains['id'] }}.crt:
   file.managed:
     - contents_pillar: {{ pillar_name }}:cert

+ 1 - 1
icingaweb2/init.sls

@@ -1,7 +1,7 @@
 #
 # Icingaweb2
 #
-{% set roles = salt['pillar.get']('nodes:' ~ grains.id ~ ':roles', []) %}
+{% set roles = salt['pillar.get']('node:roles', []) %}
 {% set icingaweb2_config = salt['pillar.get']('monitoring:icingaweb2') %}
 
 include:

+ 1 - 1
network/bootstrap.sls

@@ -7,7 +7,7 @@
 
 # Which networ suite to configure?
 {% set default_suite = salt['pillar.get']('network:suite', 'ifupdown2') %}
-{% set suite = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':network:suite', default_suite) %}
+{% set suite = salt['pillar.get']('node:network:suite', default_suite) %}
 
 include:
  - network.link

+ 1 - 1
network/ifupdown2/reload.sls

@@ -16,7 +16,7 @@ ifreload:
 # The fix script will be called every minute by cron and after ifreload
 # was called to try to minimize any downtime.
 {% set vrf = [False] %}
-{% for iface, iface_config in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':ifaces', {}).items() %}
+{% for iface, iface_config in salt['pillar.get']('node:ifaces', {}).items() %}
   {% if iface_config.get ('vrf', '') == 'vrf_external' %}
     {% do vrf.append (True) %}
     {% break %}

+ 1 - 1
network/init.sls

@@ -4,7 +4,7 @@
 
 # Which networ suite to configure?
 {% set default_suite = salt['pillar.get']('network:suite', 'ifupdown-ng') %}
-{% set suite = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':network:suite', default_suite) %}
+{% set suite = salt['pillar.get']('node:network:suite', default_suite) %}
 
 include:
   - network.link

+ 1 - 1
network/interfaces/interfaces.tmpl

@@ -2,7 +2,7 @@
 # /etc/network/interfaces (Salt managed)
 #
 
-{%- set node_config = salt['pillar.get']('nodes:' ~ grains['id'], {}) %}
+{%- set node_config = salt['pillar.get']('node', {}) %}
 {%- set node_id = grains['id'] %}
 {%- set sites_config = salt['pillar.get']('sites', {}) %}
 {%- set ifaces = salt['ffho_net.get_interface_config'](node_config, sites_config) %}

+ 1 - 1
network/link.sls

@@ -3,7 +3,7 @@
 #
 
 # Write an systemd link file for every interface with a MAC
-  {% for iface, iface_config in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':ifaces', {}).items ()|sort %}
+  {% for iface, iface_config in salt['pillar.get']('node:ifaces', {}).items ()|sort %}
     {% if 'mac' in iface_config %}
 /etc/systemd/network/42-{{ iface }}.link:
   file.managed:

+ 2 - 2
nftables/init.sls

@@ -2,7 +2,7 @@
 # nftables state
 #
 
-{% if not 'no-nftables' in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':tags', []) %}
+{% if not 'no-nftables' in salt['pillar.get']('node:tags', []) %}
 
 nftables:
   pkg.installed:
@@ -24,7 +24,7 @@ nftables:
 
 
 {% set no_purge_roles = ['docker', 'kvm'] %}
-{% set roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', [])%}
+{% set roles = salt['pillar.get']('node:roles', [])%}
 {% set not_purge_iptables = salt['ffho.any_item_in_list'](no_purge_roles, roles) %}
 
 purge-iptables:

+ 1 - 1
nginx/firmware.srv.in.ffho.net

@@ -8,7 +8,7 @@ server {
 	listen 80;
 	listen [::]:80;
 
-	root {{salt['pillar.get']('nodes:' ~ grains['id'] ~ ':path:firmware', [])}};
+	root {{ salt['pillar.get']('node:path:firmware') }};
 
 	server_name ~^firmware\.((srv\.)?in|im)\.ffho\.net$;
 	fancyindex on;

+ 1 - 1
nginx/init.sls

@@ -5,7 +5,7 @@
 include:
  - systemd
 
-{% set node_config = salt['pillar.get']('nodes:' ~ grains.id) %}
+{% set node_config = salt['pillar.get']('node') %}
 {% set nginx_pkg = node_config.get('nginx:pkg', 'nginx') %}
 {% set acme_thumbprint = salt['pillar.get']('acme:thumbprint') %}
 

+ 1 - 1
nginx/nginx.conf

@@ -22,7 +22,7 @@ http {
 	keepalive_timeout 65;
 	types_hash_max_size 2048;
 	# server_tokens off;
-{% if 'frontend' in salt['pillar.get']('nodes:' ~ grains.id ~ ':roles', []) %}
+{% if 'frontend' in salt['pillar.get']('node:roles', []) %}
 	server_names_hash_bucket_size 64;
 {%- else %}
 	# server_names_hash_bucket_size 64;

+ 1 - 1
openvpn/init.sls

@@ -147,7 +147,7 @@ Cleanup /etc/openvpn/{{ netname }}:
 #
 # OPS VPN?
 #
-{% if 'ops-vpn' in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', [])  %}
+{% if 'ops-vpn' in salt['pillar.get']('node:roles', [])  %}
 libpam-ldap:
   pkg.installed
 

+ 2 - 2
postfix/init.sls

@@ -57,7 +57,7 @@ newaliases:
 
 
 # Set mailname to node_id if not specified otherwise in node pillar.
-{% set mailname = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':mailname', grains['id']) %}
+{% set mailname = salt['pillar.get']('node:mailname', grains['id']) %}
 /etc/mailname:
   file.managed:
     - contents: "{{ mailname }}"
@@ -66,7 +66,7 @@ newaliases:
 #
 # Manage virtual domains and aliases on MX nodes
 #
-{% if 'mx' in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+{% if 'mx' in salt['pillar.get']('node:roles', []) %}
 /etc/postfix/virtual-domains:
   file.managed:
     - source: salt://postfix/virtual-domains

+ 2 - 2
pppoe/pap-secrets

@@ -39,6 +39,6 @@ stats	hostname	"*"	-
 # remove the following line.
 
 #	*	password
-{%- set user = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':pppoe:user', 'WRONG USER') %}
-{%- set pass = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':pppoe:pass', 'NO PASS') %}
+{%- set user = salt['pillar.get']('node:pppoe:user', 'WRONG USER') %}
+{%- set pass = salt['pillar.get']('node:pppoe:pass', 'NO PASS') %}
 "{{ user }}"	*	"{{ pass }}"

+ 2 - 2
pppoe/tkom_peer.tmpl

@@ -11,7 +11,7 @@
 # MUST CHANGE: Uncomment the following line, replacing the user@provider.net
 # by the DSL user name given to your by your DSL provider.
 # (There should be a matching entry in /etc/ppp/pap-secrets with the password.)
-{%- set user = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':pppoe:user', 'WRONG USER') %}
+{%- set user = salt['pillar.get']('node:pppoe:user', 'WRONG USER') %}
 user "{{ user }}"
 
 # Use the pppoe program to send the ppp packets over the Ethernet link
@@ -19,7 +19,7 @@ user "{{ user }}"
 # the Internet through this DSL connection. This is the right line to use
 # for most people.
 {%- set default_iface = 'eth2' %}
-{%- set iface = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':pppoe:iface', default_iface) %}
+{%- set iface = salt['pillar.get']('node:pppoe:iface', default_iface) %}
 pty "/usr/sbin/pppoe -I {{ iface }} -T 80 -m 1452"
 
 # An even more conservative version of the previous line, if things

+ 1 - 1
prometheus-exporters/init.sls

@@ -22,7 +22,7 @@ prometheus-node-exporter:
 #
 # Role specific exporters
 #
-{% set roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+{% set roles = salt['pillar.get']('node:roles', []) %}
 
 # DNS server
 {% if 'dns-recursor' in roles or 'dns-auth' in roles %}

+ 8 - 7
respondd/init.sls

@@ -3,7 +3,7 @@
 #
 
 {% set sites_all = pillar.get ('sites') %}
-{% set sites_node = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':sites', []) %}
+{% set sites_node = salt['pillar.get']('node:sites', []) %}
 
 /srv/ffho-respondd:
   file.directory
@@ -29,15 +29,16 @@ ffho-respondd:
     - require:
       - git: ffho-respondd
 
-{% set node_config = salt['pillar.get']('nodes:' ~ grains['id'], {}) %}
+{% set node_config = salt['pillar.get']('node', {}) %}
+{% set node_roles = node_config.get('roles', []) %}
 {% set sites_config = salt['pillar.get']('sites', {}) %}
 
 {% set ifaces = salt['ffho_net.get_interface_config'](node_config, sites_config) %}
-{% set device_no = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':id', -1) %}
+{% set device_no = salt['pillar.get']('node:id', -1) %}
 {% for site in sites_node %}
   {% set site_no = salt['pillar.get']('sites:' ~ site ~ ':site_no') %}
   {% set mac_address = salt['ffho_net.gen_batman_iface_mac'](site_no, device_no, 'bat') %}
-  {% set region_code = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':location:region:code', salt['pillar.get']('nodes:' ~ grains['id'] ~ ':site_code', '')) %}
+  {% set region_code = salt['pillar.get']('node:location:region:code', salt['pillar.get']('node:site_code', '')) %}
 
 /srv/ffho-respondd/{{site}}.conf:
   file.managed:
@@ -48,7 +49,7 @@ ffho-respondd:
       fastd_peers: "{% if 'fastd_peers' in node_config.get ('roles', []) %}true{% else %}false{% endif %}"
       hostname: "{{ grains['id'].split('.')[0] }}{% if node_config.get ('sites', [])|length > 1 or grains.id.startswith('gw') %}-{{site}}{% endif %}"
       mcast_iface: {% if 'br-' ~ site in ifaces %}"br-{{site}}"{% else %}"bat-{{site}}"{% endif %}
-    {% if 'fastd' in node_config.get ('roles', []) %}
+    {% if 'fastd' in node_roles %}
       mesh_vpn: [{{ site }}_intergw, {{ site }}_nodes4, {{ site }}_nodes6]
     {% else %}
       mesh_vpn: False
@@ -70,7 +71,7 @@ respondd@{{site}}:
       - file: /srv/ffho-respondd/{{site}}.conf
       - git: ffho-respondd
 
-{% if 'batman_ext' in node_config.get('roles', []) %}
+{% if 'batman_ext' in node_roles %}
 /srv/ffho-respondd/{{site}}-ext.conf:
   file.managed:
     - source: salt://respondd/respondd-config.tmpl
@@ -80,7 +81,7 @@ respondd@{{site}}:
       fastd_peers: "{% if 'fastd_peers' in node_config.get ('roles', []) %}true{% else %}false{% endif %}"
       hostname: "{{ grains['id'].split('.')[0] }}{% if node_config.get ('sites', [])|length > 1 or grains.id.startswith('gw') %}-{{site}}{% endif %}-ext"
       mcast_iface: "bat-{{ site }}-ext"
-    {% if 'fastd' in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+    {% if 'fastd' in node_roles %}
       mesh_vpn: [{{ site }}_intergw, {{ site }}_nodes4, {{ site }}_nodes6]
     {% else %}
       mesh_vpn: False

+ 1 - 1
rsyslog/init.sls

@@ -2,7 +2,7 @@
 # Rsyslog configuration
 #
 
-{% set roles = salt['pillar.get'] ('nodes:' ~ grains['id'] ~ ':roles') %}
+{% set roles = salt['pillar.get'] ('node:roles') %}
 {% set logserver = salt['pillar.get'] ('logging:syslog:logserver') %}
 {% set graylog_uri = salt['pillar.get'] ('logging:graylog:syslog_uri') %}
 

+ 2 - 2
snmpd/snmpd.conf.tmpl

@@ -1,7 +1,7 @@
-{%- set node_config = salt['pillar.get'] ('nodes:' ~ grains['id']) %}
+{%- set node_config = salt['pillar.get'] ('node') %}
 {%- set ro_community = salt['pillar.get'] ('globals:snmp:ro_community', "Configure globals:snmp:ro_community in pillar...") %}
 {%- set sys_contact = salt['pillar.get'] ('globals:ops_mail', "Configure globals:ops_mail in pillar...") %}
-{%- set sys_location = salt['pillar.get'] ('nodes:' ~ grains['id'] ~ 'location:site:code', node_config.get ('sysLocation', "Please fix sysLocation...")) %}
+{%- set sys_location = salt['pillar.get'] ('node:location:site:code', node_config.get ('sysLocation', "Please fix sysLocation...")) %}
 {%- set nms_list = salt['pillar.get'] ('globals:snmp:nms_list', []) %}
 {%- set sites_config = salt['pillar.get'] ('sites', {}) %}
 {%- set ifaces = salt['ffho_net.get_interface_config'] (node_config, sites_config) %}

+ 1 - 1
ssh/authorized_keys.tmpl

@@ -10,6 +10,6 @@
   {%- endif %}
   {%- do entry.update({ 'pubkeys': [salt['pillar.get']('nodes:' + host + ':ssh:' + user + ':pubkey')]}) %}
 {%- endfor %}
-{%- set node_config = salt['pillar.get']('nodes:' ~ grains['id']) -%}
+{%- set node_config = salt['pillar.get']('node') -%}
 {%- set auth_keys = salt['ffho_auth.get_ssh_authkeys'](ssh_config, node_config, grains['id'], username) -%}
 {{ "\n".join (auth_keys) }}

+ 5 - 5
ssh/init.sls

@@ -2,7 +2,7 @@
 # SSH configuration
 #
 
-{% set node_config = salt['pillar.get']('nodes:' ~ grains.id) %}
+{% set node_config = salt['pillar.get']('node') %}
 
 # Install ssh server
 ssh:
@@ -72,7 +72,7 @@ ssh-{{ user }}:
 {# Add SSH-Keys for user #}
 {{ path }}/.ssh/id_rsa:
   file.managed:
-    - contents_pillar: nodes:{{ grains.id }}:ssh:{{ user }}:privkey
+    - contents_pillar: node:ssh:{{ user }}:privkey
     - user: {{ user }}
     - group: {{ user }}
     - mode: 600
@@ -81,7 +81,7 @@ ssh-{{ user }}:
 
 {{ path }}/.ssh/id_rsa.pub:
   file.managed:
-    - contents_pillar: nodes:{{ grains.id }}:ssh:{{ user }}:pubkey
+    - contents_pillar: node:ssh:{{ user }}:pubkey
     - user: {{ user }}
     - group: {{ user }}
     - mode: 644
@@ -94,14 +94,14 @@ ssh-{{ user }}:
 {% for key in node_config.get('ssh', {}).get('host', {}) if key in ['dsa', 'ecdsa', 'ed25519', 'rsa'] %}
 /etc/ssh/ssh_host_{{ key }}_key:
   file.managed:
-    - contents_pillar: nodes:{{ grains.id }}:ssh:host:{{ key }}:privkey
+    - contents_pillar: node:ssh:host:{{ key }}:privkey
     - mode: 600
     - watch_in:
       - service: ssh
 
 /etc/ssh/ssh_host_{{ key }}_key.pub:
   file.managed:
-    - contents_pillar: nodes:{{ grains.id }}:ssh:host:{{ key }}:pubkey
+    - contents_pillar: node:ssh:host:{{ key }}:pubkey
     - mode: 644
     - watch_in:
       - service: ssh

+ 1 - 1
sysctl/init.sls

@@ -1,7 +1,7 @@
 #
 # sysctl
 #
-{%- set roles = salt['pillar.get']('nodes:' ~ grains['id'] ~ ':roles', []) %}
+{%- set roles = salt['pillar.get']('node:roles', []) %}
 
 # Define command to reload sysctl settings here without dependencies
 # and define inverse dependencies where useful (see sysctl.conf)

+ 1 - 1
wireguard/init.sls

@@ -1,7 +1,7 @@
 #
 # Wireguard VPNs
 #
-{% set wg_cfg = salt['pillar.get']('nodes:' ~ grains.id ~ ':wireguard', {}) %}
+{% set wg_cfg = salt['pillar.get']('node:wireguard', {}) %}
 
 
 include:

+ 2 - 2
yanic/init.sls

@@ -25,11 +25,11 @@ yanic:
       - file: yanic
 
 # get loopback IPv6 for binding the webserver to it
-{% set node_config = salt['pillar.get']('nodes:' ~ grains['id']) %}
+{% set node_config = salt['pillar.get']('node') %}
 {% set bind_ip = salt['ffho_net.get_primary_ip'](node_config, 'v6').ip %}
 
 # for each site
-{% for site in salt['pillar.get']('nodes:' ~ grains['id'] ~ ':sites', []) %}
+{% for site in node_config.get('sites', []) %}
 # add webserver directory
 /srv/yanic/data/{{site}}:
   file.directory: