git.lirion.de

Of git, get, and gud

summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH. P. <coding _æ_ lirion.de> 2024-05-05 14:46:51 +0200
committerH. P. <coding _æ_ lirion.de> 2024-05-05 14:46:51 +0200
commit4f0b5cb177f98c7f1d80504f94eeba94f721d2de (patch)
tree0cc13128f0ac6301c969a328908b74eb3f003344
downloados-patch-master.tar.bz2
Initial commit after portHEADmaster
This is a ported collection of my patch playbooks + roles. Before, they were living inside an "all things ansible" repository. The history is not important as shortly before porting, the code had been revamped (before, it employed changes for host selection which worked but changes are not intended for that).
-rw-r--r--patch.yaml55
-rw-r--r--roles/patch_debian/tasks/main.yaml99
-rw-r--r--roles/patch_redhat/tasks/main.yaml91
-rw-r--r--roles/patch_suse/tasks/main.yaml95
4 files changed, 340 insertions, 0 deletions
diff --git a/patch.yaml b/patch.yaml
new file mode 100644
index 0000000..e9159f7
--- /dev/null
+++ b/patch.yaml
@@ -0,0 +1,55 @@
+---
+# You may want to change the default to your favourite host (group) you run this on the most.
+- name: Arrange Inventory
+ hosts: "{{ rthosts | default('CHANGE_ME') }}"
+ order: inventory
+ gather_facts: false
+ # default: all in first step, but that shit requires (int)
+ serial: 666
+ tasks:
+ - name: Gather necessary facts
+ ansible.builtin.setup:
+ filter: "ansible_distribution*"
+ - name: Group hosts by distribution file variety
+ ansible.builtin.group_by:
+ # We choose to lowercase anything here as there should be no collisions but
+ # SUSE could be "SuSE" or "SUSE" (assumed and unverified, but you never know...)
+ key: "adfv_{{ ansible_distribution_file_variety | lower | default('none') }}"
+ tags:
+ - always
+- name: Trigger Debian patching role on Debian hosts
+ hosts: adfv_debian
+ order: inventory
+ gather_facts: false
+ # default: all in first step, but that shit requires (int)
+ serial: 666
+ tasks:
+ - name: Debian Patches
+ ansible.builtin.import_role:
+ name: "patch_debian"
+ tags:
+ - debian
+- name: Trigger Red Hat patching role on Red Hat hosts
+ hosts: adfv_redhat
+ order: inventory
+ gather_facts: false
+ # default: all in first step, but that shit requires (int)
+ serial: 666
+ tasks:
+ - name: Red Hat Patches
+ ansible.builtin.import_role:
+ name: "patch_redhat"
+ tags:
+ - redhat
+- name: Trigger SUSE patching role on SUSE hosts
+ hosts: adfv_suse
+ order: inventory
+ gather_facts: false
+ # default: all in first step, but that shit requires (int)
+ serial: 666
+ tasks:
+ - name: SUSE Patches
+ ansible.builtin.import_role:
+ name: "patch_suse"
+ tags:
+ - suse
diff --git a/roles/patch_debian/tasks/main.yaml b/roles/patch_debian/tasks/main.yaml
new file mode 100644
index 0000000..9d96a4e
--- /dev/null
+++ b/roles/patch_debian/tasks/main.yaml
@@ -0,0 +1,99 @@
+---
+- name: "Check whether OS is a Debian derivative"
+ ansible.builtin.assert:
+ that:
+ - ansible_distribution_file_variety == 'Debian'
+ no_log: true
+- name: Update repository cache
+ ansible.builtin.apt:
+ update_cache: "yes"
+ become: true
+- name: Check for upgrades
+ ansible.builtin.shell:
+ cmd: apt list --upgradable 2>/dev/null | grep -v ^Listing | wc -l
+ # ZWEI GEKREUZTE HÄMMER UND EIN GROSSES W
+ register: aue
+ # apt will throw an error because it doesn't like piping yet.
+ # for our purposes, however, everything has already been sufficiently implemented.
+ failed_when: false
+ #changed_when: aue.stdout|int > 0
+ changed_when: false
+- block:
+ - name: Check for existence of rkhunter
+ ansible.builtin.stat:
+ path: /usr/bin/rkhunter
+ register: rkhex
+ ignore_errors: true
+ no_log: true
+ changed_when: false
+ - name: RKhunter pre-check
+ ansible.builtin.command: rkhunter -c --sk --rwo --ns
+ become: true
+ no_log: true
+ changed_when: false
+ when:
+ - rkhex.stat is defined
+ - rkhex.stat.executable is defined
+ - rkhex.stat.executable|bool == True
+ - name: Clean packages cache
+ ansible.builtin.command: apt clean
+ changed_when: true
+ become: true
+ - name: Upgrade packages (Debian)
+ ansible.builtin.apt:
+ upgrade: dist
+ become: true
+ - name: Remove dependencies that are no longer required
+ ansible.builtin.apt:
+ autoremove: "yes"
+ purge: "yes"
+ become: true
+ name: Update and RKhunter checks
+ when: aue.stdout|int > 0
+- block:
+ - name: Check for existence of needrestart
+ ansible.builtin.stat:
+ path: /usr/sbin/needrestart
+ register: nrex
+ - name: Check for outdated kernel
+ ansible.builtin.command: /usr/sbin/needrestart -pk
+ register: kernout
+ changed_when: false
+ # failed_when necessary to not fail on RC 1 instead of a true failure
+ failed_when: kernout.rc > 2
+ - name: Check for outdated services
+ ansible.builtin.command: /usr/sbin/needrestart -pl
+ register: svcout
+ changed_when: false
+ # failed_when necessary to not fail on RC 1 instead of a true failure
+ failed_when: svcout.rc > 2
+ become: true
+ name: Check reboot requirement
+ when:
+ - nrex.stat is defined
+ - nrex.stat.exists == true
+ - nrex.stat.executable|bool == True
+- name: Clean apt cache
+ # ansible's apt module does not have a dedicated action for this yet. So shell it is:
+ ansible.builtin.command: apt clean
+ changed_when: false
+ become: true
+ # here, we already listen to "debian updates available" already since we already did a more generic cleanup above (unless narrowed down as well)
+- name: RKhunter properties update
+ ansible.builtin.command: rkhunter --propupd --rwo --ns
+ become: true
+ changed_when: true
+ when:
+ - rkhex.stat is defined
+ - rkhex.stat.executable is defined
+ - rkhex.stat.executable|bool == True
+- name: Reboot if required
+ # ignore_errors: yes
+ ansible.builtin.reboot:
+ reboot_timeout: 300
+ pre_reboot_delay: 5
+ test_command: uptime
+ reboot_command: "/bin/systemctl reboot"
+ become: true
+ when: ( kernout.rc is defined and kernout.rc|int == 1 ) or ( svcout.rc is defined and svcout.r|int == 1 ) or
+ ( kernout.rc is not defined and svcout.rc is not defined )
diff --git a/roles/patch_redhat/tasks/main.yaml b/roles/patch_redhat/tasks/main.yaml
new file mode 100644
index 0000000..7f200e7
--- /dev/null
+++ b/roles/patch_redhat/tasks/main.yaml
@@ -0,0 +1,91 @@
+---
+- name: "Check whether OS is a Red Hat derivative"
+ ansible.builtin.assert:
+ that:
+ - ansible_distribution_file_variety == 'RedHat'
+ no_log: true
+- name: Update yum/dnf cache
+ # We want to see a dedicated failure if the repos cannot be fetched already.
+ # Cheating here: yum wants a "state" statement to be placed before it takes action, and then - other than stated in the docs -
+ # we can trigger an action containing update_cache without "name" being mandatory. So we will have no package present with
+ # updated cache :-)
+ ansible.builtin.yum:
+ state: present
+ update_cache: "yes"
+ validate_certs: "yes"
+ become: true
+- name: Check for upgrades (RHEL)
+ # yum check-upgrade would normally throw an RC 100 if updates are available.
+ # But through ansible: RC0! Weeeee
+ ansible.builtin.shell: /usr/bin/yum -q -C check-upgrade 2>/dev/null | wc -l
+ # args:
+ # warn: false
+ register: yue
+ changed_when: false
+ become: true
+- block:
+ - name: Check for existence of rkhunter
+ ansible.builtin.stat:
+ path: /usr/bin/rkhunter
+ register: rkhex
+ - name: RKhunter pre-check
+ ansible.builtin.command: rkhunter -c --sk --rwo --ns
+ become: true
+ no_log: true
+ changed_when: false
+ when:
+ - rkhex.stat is defined
+ - rkhex.stat.executable is defined
+ - rkhex.stat.executable
+ - name: Upgrade all installed packages (RHEL)
+ ansible.builtin.yum:
+ name: '*'
+ state: latest
+ validate_certs: "yes"
+ skip_broken: "yes"
+ become: true
+ # Auto-removal is broken and will nuke packages we previously selected through e.g. ansible.
+ # See ansible issue #60349. Leaving commented out. -- pff
+ # - name: Auto-removal of orphaned dependencies (RHEL)
+ # ansible.builtin.yum:
+ # autoremove: "yes"
+ name: Updates and RKhunter checks
+ # yum always tosses an arbitrary extra line at you, a simple tr -s does not eradicate it, so - well,
+ # 0 and 1 are fine. As explained above, the RC is worthless when run through ansible.
+ when: yue.stdout|int > 1
+- block:
+ - name: Register requirement for reboot (RHEL)
+ # "yum needs-restarting still works on RHEL 8, and "needs-restarting" is obsolete
+ # On major releases >= 9 you may want to create an alternative for symlinking yum to dnf
+ ansible.builtin.command: yum needs-restarting -r
+ register: nr
+ changed_when: false
+ failed_when: false
+ become: true
+ name: Check reboot requirement
+- name: Clean packages cache (RHEL)
+ # ansible's yum module does not have a dedicated action for this. So shell it is.
+ # CAUTION: This will only work as long as modern RHEL derivatives (RHEL/CentOS >=8, Fedora >=30) will have yum available as pseudo-alias to dnf.
+ # Also, despite ansible's yum not offering this feature, ansible will warn that there is a yum module and we should consider using it. Turning warnings off.
+ #args:
+ # warn: false
+ ansible.builtin.command: yum clean packages
+ changed_when: true
+ become: true
+- name: RKhunter properties update
+ ansible.builtin.command: rkhunter --propupd --rwo --ns
+ become: true
+ changed_when: true
+ when:
+ - rkhex.stat is defined
+ - rkhex.stat.executable is defined
+ - rkhex.stat.executable
+- name: Reboot if required
+ # ignore_errors: yes
+ ansible.builtin.reboot:
+ reboot_timeout: 300
+ pre_reboot_delay: 5
+ test_command: uptime
+ reboot_command: "/bin/systemctl reboot"
+ become: true
+ when: ( nr.rc is defined and nr.rc|int > 0 ) or ( nr.rc is not defined )
diff --git a/roles/patch_suse/tasks/main.yaml b/roles/patch_suse/tasks/main.yaml
new file mode 100644
index 0000000..db15d75
--- /dev/null
+++ b/roles/patch_suse/tasks/main.yaml
@@ -0,0 +1,95 @@
+---
+- name: "Check whether OS is a SUSE derivative"
+ ansible.builtin.assert:
+ that:
+ - ansible_distribution_file_variety == 'SUSE' or ansible_distribution_file_variety == 'SuSE'
+ no_log: true
+- name: Check for existence of rkhunter
+ ansible.builtin.stat:
+ path: /usr/bin/rkhunter
+ register: rkhex
+- name: Update zypper cache (SUSE)
+ # we cannot cheat like we did with yum: we need to update any package to refresh the cache with the zypper module. Hence falling back
+ # to shell.
+ ansible.builtin.shell:
+ cmd: 'zypper refs && zypper ref'
+ changed_when: false
+ register: zypperref
+ become: true
+- name: Verify Zypper repository availability
+ # Now, here's the thing with zypper. If you have a dead repository, you need to face the following facts:
+ # 1. All output goes to stdout. For zypper lu at least on SLE12/openSUSE42 and earlier, this is:
+ # - The packages available for update
+ # - Debug output lik "loading repository data..." and "reading installed packages..."
+ # (could be silenced with -q, but without RC feedback we need the debug strings again, kek.)
+ # - WARNING(!!) messages
+ # ... there is no STDERR.
+ # 2. There is no return code other than 0 for warnings.
+ # Great. Interaction with automatisms as if that stuff came directly from Redmond.
+ # So we need to parse the fucking output string in ansible. Let's start with the "repository not available" warnings.
+ ansible.builtin.debug:
+ msg: "Dead repositories existing and no update present, we consider this a failure."
+ when:
+ - zypperref is search("Repository.*appears to be outdated")
+ - zypperref is search("No updates found")
+ failed_when: true
+- name: Check for zypper updates
+ ansible.builtin.command: zypper lu
+ register: zypperlu
+ changed_when: false
+ become: true
+- block:
+ - name: Update all packages (SUSE)
+ # we could narrow this down via type:patch, but that's about all. So fire away.
+ community.general.zypper:
+ name: '*'
+ state: latest
+ extra_args: '--no-refresh'
+ # this is only document as "zypper rm -u", so apparently nothing is existing like
+ # rpm's cleanup or apt's "autoremove" :(
+ # clean_deps: true
+ become: true
+ name: Update and RKhunter checks
+ when:
+ - zypperlu is not search("No updates found.")
+- block:
+ - name: Register requirement for reboot (SUSE)
+ # change in paradigm: we will now use "needs-rebooting", suse implemented that somewhere between 12 and 15, instead of "ps -sss"
+ # todo: what to do if services require a refork?
+ # shell: zypper ps -sss
+ ansible.builtin.command: zypper needs-rebooting
+ register: nrout
+ changed_when: nrout.rc|int == 102
+ failed_when: nrout.rc|int != 102 and nrout.rc|int != 0
+ notify: "Reboot if required"
+ # we listen to "suse upd" here in case a previous reboot was skipped. Change to "suse updates available" if undesired.
+ name: Check reboot requirement
+- block:
+ - name: Clean packages cache
+ # ansible's zypper module does not have a dedicated action for this yet. So shell it is:
+ ansible.builtin.command: zypper clean
+ changed_when: false
+ - name: Purge old kernels
+ # ansible's zypper module does not have a dedicated action for this yet. So shell it is:
+ ansible.builtin.command: zypper purge-kernels
+ # TODO: Check output for actual kernel-purging and make this a proper statement:
+ changed_when: false
+ name: Cleanup
+ become: true
+- name: RKhunter properties update
+ ansible.builtin.command: rkhunter --propupd --rwo --ns
+ become: true
+ changed_when: true
+ when:
+ - rkhex.stat is defined
+ - rkhex.stat.executable is defined
+ - rkhex.stat.executable|bool == true
+- name: Reboot if required
+ # ignore_errors: yes
+ ansible.builtin.reboot:
+ reboot_timeout: 300
+ pre_reboot_delay: 5
+ test_command: uptime
+ reboot_command: "/bin/systemctl reboot"
+ become: true
+ when: nrout is defined and nrout.rc is defined and nrout.rc|int == 102