Skip to content

Commit

Permalink
Change to FQCN with ansible-lint fixer
Browse files Browse the repository at this point in the history
Since ansible-base 2.10 (later ansible-core), FQCN is the new way to go.

Updated .ansible-lint with a production profile and removed fqcn in skip_list.
Updated .yamllint with rules needed.

Ran ansible-lint --fix=all, then manually applied some minor changes.
  • Loading branch information
rholmboe committed Aug 9, 2024
1 parent 635f0b2 commit 6ed5868
Show file tree
Hide file tree
Showing 48 changed files with 315 additions and 316 deletions.
18 changes: 9 additions & 9 deletions .ansible-lint
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
---
profile: production
exclude_paths:
# default paths
- '.cache/'
- '.github/'
- 'test/fixtures/formatting-before/'
- 'test/fixtures/formatting-prettier/'
- .cache/
- .github/
- test/fixtures/formatting-before/
- test/fixtures/formatting-prettier/

# The "converge" and "reset" playbooks use import_playbook in
# conjunction with the "env" lookup plugin, which lets the
# syntax check of ansible-lint fail.
- 'molecule/**/converge.yml'
- 'molecule/**/prepare.yml'
- 'molecule/**/reset.yml'
- molecule/**/converge.yml
- molecule/**/prepare.yml
- molecule/**/reset.yml

# The file was generated by galaxy ansible - don't mess with it.
- 'galaxy.yml'
- galaxy.yml

skip_list:
- 'fqcn-builtins'
- var-naming[no-role-prefix]
10 changes: 9 additions & 1 deletion .yamllint
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,18 @@
extends: default

rules:
comments:
min-spaces-from-content: 1
comments-indentation: false
braces:
max-spaces-inside: 1
octal-values:
forbid-implicit-octal: true
forbid-explicit-octal: true
line-length:
max: 120
level: warning
truthy:
allowed-values: ['true', 'false']
allowed-values: ["true", "false"]
ignore:
- galaxy.yml
36 changes: 18 additions & 18 deletions inventory/sample/group_vars/all.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,34 +5,34 @@ ansible_user: ansibleuser
systemd_dir: /etc/systemd/system

# Set your timezone
system_timezone: "Your/Timezone"
system_timezone: Your/Timezone

# interface which will be used for flannel
flannel_iface: "eth0"
flannel_iface: eth0

# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: "v3.28.0" # calico version tag
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: v3.28.0 # calico version tag

# uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.16.0" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
cilium_mode: native # native when nodes on same subnet or using bgp, else set routed
cilium_tag: v1.16.0 # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui

# if using calico or cilium, you may specify the cluster pod cidr pool
cluster_cidr: "10.52.0.0/16"
cluster_cidr: 10.52.0.0/16

# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
cilium_bgp: false

# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
cilium_bgp_my_asn: "64513"
cilium_bgp_peer_asn: "64512"
cilium_bgp_peer_address: "192.168.30.1"
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
cilium_bgp_peer_address: 192.168.30.1
cilium_bgp_lb_cidr: 192.168.31.0/24 # cidr for cilium loadbalancer ipam

# enable kube-vip ARP broadcasts
kube_vip_arp: true
Expand All @@ -47,11 +47,11 @@ kube_vip_bgp_peeraddress: "192.168.30.1" # Defines the address for the BGP peer
kube_vip_bgp_peeras: "64512" # Defines the AS for the BGP peer

# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222"
apiserver_endpoint: 192.168.30.222

# k3s_token is required masters can talk together securely
# this token should be alpha numeric only
k3s_token: "some-SUPER-DEDEUPER-secret-password"
k3s_token: some-SUPER-DEDEUPER-secret-password

# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
Expand Down Expand Up @@ -84,7 +84,7 @@ extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.8.2"
kube_vip_tag_version: v0.8.2

# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
Expand All @@ -94,22 +94,22 @@ kube_vip_tag_version: "v0.8.2"
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"

# metallb type frr or native
metal_lb_type: "native"
metal_lb_type: native

# metallb mode layer2 or bgp
metal_lb_mode: "layer2"
metal_lb_mode: layer2

# bgp options
# metal_lb_bgp_my_asn: "64513"
# metal_lb_bgp_peer_asn: "64512"
# metal_lb_bgp_peer_address: "192.168.30.1"

# image tag for metal lb
metal_lb_speaker_tag_version: "v0.14.8"
metal_lb_controller_tag_version: "v0.14.8"
metal_lb_speaker_tag_version: v0.14.8
metal_lb_controller_tag_version: v0.14.8

# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
metal_lb_ip_range: 192.168.30.80-192.168.30.90

# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
# in your hosts.ini file.
Expand Down
2 changes: 1 addition & 1 deletion inventory/sample/group_vars/proxmox.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
ansible_user: '{{ proxmox_lxc_ssh_user }}'
ansible_user: "{{ proxmox_lxc_ssh_user }}"
4 changes: 2 additions & 2 deletions molecule/calico/molecule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master
Expand Down
4 changes: 2 additions & 2 deletions molecule/calico/overrides.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,5 @@
retry_count: 45

# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.224"
metal_lb_ip_range: "192.168.30.100-192.168.30.109"
apiserver_endpoint: 192.168.30.224
metal_lb_ip_range: 192.168.30.100-192.168.30.109
4 changes: 2 additions & 2 deletions molecule/cilium/molecule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master
Expand Down
4 changes: 2 additions & 2 deletions molecule/cilium/overrides.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,5 @@
retry_count: 45

# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
metal_lb_ip_range: "192.168.30.110-192.168.30.119"
apiserver_endpoint: 192.168.30.225
metal_lb_ip_range: 192.168.30.110-192.168.30.119
9 changes: 4 additions & 5 deletions molecule/default/molecule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ dependency:
driver:
name: vagrant
platforms:

- name: control1
box: generic/ubuntu2204
memory: 1024
Expand All @@ -18,8 +17,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant

- name: control2
box: generic/debian12
Expand Down Expand Up @@ -56,8 +55,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant

- name: node2
box: generic/rocky9
Expand Down
12 changes: 6 additions & 6 deletions molecule/ipv6/molecule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant

- name: control2
box: generic/ubuntu2204
Expand All @@ -33,8 +33,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant

- name: node1
box: generic/ubuntu2204
Expand All @@ -49,8 +49,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
provisioner:
name: ansible
env:
Expand Down
4 changes: 2 additions & 2 deletions molecule/kube-vip/molecule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master
Expand Down
4 changes: 2 additions & 2 deletions molecule/kube-vip/overrides.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@
retry_count: 45

# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
apiserver_endpoint: 192.168.30.225
# Use kube-vip instead of MetalLB
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"
kube_vip_lb_ip_range: 192.168.30.110-192.168.30.119
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
name: nginx
namespace: "{{ testing_namespace }}"
kubeconfig: "{{ kubecfg_path }}"
vars: &load_balancer_metadata
vars:
metallb_ip: status.loadBalancer.ingress[0].ip
metallb_port: spec.ports[0].port
register: nginx_services
Expand All @@ -43,10 +43,10 @@
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port_: >-
{{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]

always:
- name: "Remove namespace: {{ testing_namespace }}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
ansible.builtin.assert:
that: found_nodes == expected_nodes
success_msg: "Found nodes as expected: {{ found_nodes }}"
fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
fail_msg: Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}
vars:
found_nodes: >-
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
Expand All @@ -22,7 +22,7 @@
| unique
| sort
}}
# Deactivated linter rules:
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
4 changes: 2 additions & 2 deletions molecule/single_node/molecule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master
Expand Down
4 changes: 2 additions & 2 deletions molecule/single_node/overrides.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,5 @@
retry_count: 45

# Make sure that our IP ranges do not collide with those of the default scenario
apiserver_endpoint: "192.168.30.223"
metal_lb_ip_range: "192.168.30.91-192.168.30.99"
apiserver_endpoint: 192.168.30.223
metal_lb_ip_range: 192.168.30.91-192.168.30.99
2 changes: 1 addition & 1 deletion reboot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,6 @@
tasks:
- name: Reboot the nodes (and Wait upto 5 mins max)
become: true
reboot:
ansible.builtin.reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 300
4 changes: 2 additions & 2 deletions reset.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
become: true
- role: raspberrypi
become: true
vars: {state: absent}
vars: { state: absent }
post_tasks:
- name: Reboot and wait for node to come back up
become: true
reboot:
ansible.builtin.reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 3600

Expand Down
18 changes: 8 additions & 10 deletions roles/download/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,36 +1,34 @@
---

- name: Download k3s binary x64
get_url:
ansible.builtin.get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
mode: "0755"
when: ansible_facts.architecture == "x86_64"

- name: Download k3s binary arm64
get_url:
ansible.builtin.get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
mode: "0755"
when:
- ( ansible_facts.architecture is search("arm") and
ansible_facts.userspace_bits == "64" ) or
ansible_facts.architecture is search("aarch64")
- ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" )
or ansible_facts.architecture is search("aarch64")

- name: Download k3s binary armhf
get_url:
ansible.builtin.get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
mode: "0755"
when:
- ansible_facts.architecture is search("arm")
- ansible_facts.userspace_bits == "32"
Loading

0 comments on commit 6ed5868

Please sign in to comment.