ansible-playbook [core 2.17.7] config file = None configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.12/site-packages/ansible ansible collection location = /tmp/collections-9ny executable location = /usr/local/bin/ansible-playbook python version = 3.12.6 (main, Oct 29 2024, 00:00:00) [GCC 14.2.1 20240801 (Red Hat 14.2.1-2)] (/usr/bin/python3.12) jinja version = 3.1.4 libyaml = True No config file found; using defaults running playbook inside collection fedora.linux_system_roles redirecting (type: callback) ansible.builtin.debug to ansible.posix.debug redirecting (type: callback) ansible.builtin.debug to ansible.posix.debug redirecting (type: callback) ansible.builtin.profile_tasks to ansible.posix.profile_tasks Skipping callback 'default', as we already have a stdout callback. Skipping callback 'minimal', as we already have a stdout callback. Skipping callback 'oneline', as we already have a stdout callback. PLAYBOOK: tests_quadlet_pod.yml ************************************************ 2 plays in /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml PLAY [all] ********************************************************************* TASK [Include vault variables] ************************************************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:5 Saturday 14 December 2024 11:33:02 -0500 (0:00:00.013) 0:00:00.013 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_test_password": { "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n35383939616163653333633431363463313831383037386236646138333162396161356130303461\n3932623930643263313563336163316337643562333936360a363538636631313039343233383732\n38666530383538656639363465313230343533386130303833336434303438333161656262346562\n3362626538613031640a663330613638366132356534363534353239616666653466353961323533\n6565\n" }, "mysql_container_root_password": { "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n61333932373230333539663035366431326163363166363036323963623131363530326231303634\n6635326161643165363366323062333334363730376631660a393566366139353861656364656661\n38653463363837336639363032646433666361646535366137303464623261313663643336306465\n6264663730656337310a343962353137386238383064646533366433333437303566656433386233\n34343235326665646661623131643335313236313131353661386338343366316261643634653633\n3832313034366536616531323963333234326461353130303532\n" } }, "ansible_included_var_files": [ "/tmp/podman-olA/tests/vars/vault-variables.yml" ], "changed": false } PLAY [Ensure that the role can manage quadlet pods] **************************** TASK [Gathering Facts] ********************************************************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:9 Saturday 14 December 2024 11:33:02 -0500 (0:00:00.034) 0:00:00.048 ***** [WARNING]: Platform linux on host managed-node1 is using the discovered Python interpreter at /usr/bin/python3.12, but future installation of another Python interpreter could change the meaning of that path. See https://docs.ansible.com/ansible- core/2.17/reference_appendices/interpreter_discovery.html for more information. ok: [managed-node1] TASK [Run the role - root] ***************************************************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:34 Saturday 14 December 2024 11:33:04 -0500 (0:00:01.458) 0:00:01.507 ***** included: fedora.linux_system_roles.podman for managed-node1 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Saturday 14 December 2024 11:33:04 -0500 (0:00:00.090) 0:00:01.597 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Saturday 14 December 2024 11:33:04 -0500 (0:00:00.037) 0:00:01.635 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Saturday 14 December 2024 11:33:04 -0500 (0:00:00.055) 0:00:01.691 ***** ok: [managed-node1] => { "changed": false, "stat": { "exists": false } } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Saturday 14 December 2024 11:33:04 -0500 (0:00:00.603) 0:00:02.294 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_is_ostree": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Saturday 14 December 2024 11:33:04 -0500 (0:00:00.051) 0:00:02.345 ***** ok: [managed-node1] => { "changed": false, "stat": { "exists": false } } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Saturday 14 December 2024 11:33:05 -0500 (0:00:00.405) 0:00:02.751 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_is_transactional": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Saturday 14 December 2024 11:33:05 -0500 (0:00:00.045) 0:00:02.796 ***** ok: [managed-node1] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node1] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Saturday 14 December 2024 11:33:05 -0500 (0:00:00.081) 0:00:02.878 ***** ok: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Saturday 14 December 2024 11:33:06 -0500 (0:00:01.246) 0:00:04.124 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Saturday 14 December 2024 11:33:06 -0500 (0:00:00.086) 0:00:04.211 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages))", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Saturday 14 December 2024 11:33:06 -0500 (0:00:00.138) 0:00:04.350 ***** skipping: [managed-node1] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Saturday 14 December 2024 11:33:07 -0500 (0:00:00.116) 0:00:04.466 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Saturday 14 December 2024 11:33:07 -0500 (0:00:00.124) 0:00:04.591 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Saturday 14 December 2024 11:33:07 -0500 (0:00:00.150) 0:00:04.743 ***** ok: [managed-node1] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.024195", "end": "2024-12-14 11:33:07.886559", "rc": 0, "start": "2024-12-14 11:33:07.862364" } STDOUT: podman version 5.3.1 STDERR: time="2024-12-14T11:33:07-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Saturday 14 December 2024 11:33:07 -0500 (0:00:00.659) 0:00:05.402 ***** ok: [managed-node1] => { "ansible_facts": { "podman_version": "5.3.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Saturday 14 December 2024 11:33:08 -0500 (0:00:00.057) 0:00:05.459 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Saturday 14 December 2024 11:33:08 -0500 (0:00:00.056) 0:00:05.515 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Saturday 14 December 2024 11:33:08 -0500 (0:00:00.085) 0:00:05.601 ***** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Saturday 14 December 2024 11:33:08 -0500 (0:00:00.094) 0:00:05.695 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Saturday 14 December 2024 11:33:08 -0500 (0:00:00.074) 0:00:05.769 ***** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Saturday 14 December 2024 11:33:08 -0500 (0:00:00.077) 0:00:05.847 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Saturday 14 December 2024 11:33:08 -0500 (0:00:00.127) 0:00:05.974 ***** ok: [managed-node1] => { "ansible_facts": { "getent_passwd": { "root": [ "x", "0", "0", "Super User", "/root", "/bin/bash" ] } }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.599) 0:00:06.574 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.075) 0:00:06.649 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:28 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.106) 0:00:06.755 ***** ok: [managed-node1] => { "changed": false, "stat": { "atime": 1734193880.1628356, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "89ab10a2a8fa81bcc0c1df0058f200469ce46f97", "ctime": 1734193861.1678128, "dev": 51714, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 8859182, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1730678400.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15744, "uid": 0, "version": "2878164177", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:39 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.444) 0:00:07.200 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:44 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.052) 0:00:07.253 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:49 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.051) 0:00:07.304 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:62 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.037) 0:00:07.341 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:67 Saturday 14 December 2024 11:33:09 -0500 (0:00:00.047) 0:00:07.389 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:72 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.046) 0:00:07.436 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:82 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.050) 0:00:07.486 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:89 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.037) 0:00:07.524 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set config file paths] **************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:115 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.042) 0:00:07.567 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_container_conf_file": "/etc/containers/containers.conf.d/50-systemroles.conf", "__podman_policy_json_file": "/etc/containers/policy.json", "__podman_registries_conf_file": "/etc/containers/registries.conf.d/50-systemroles.conf", "__podman_storage_conf_file": "/etc/containers/storage.conf" }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle container.conf.d] ************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:124 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.090) 0:00:07.657 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure containers.d exists] *********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:5 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.090) 0:00:07.747 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update container config file] ********* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_container_conf_d.yml:13 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.064) 0:00:07.812 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_containers_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle registries.conf.d] ************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:127 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.071) 0:00:07.884 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure registries.d exists] *********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:5 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.157) 0:00:08.041 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update registries config file] ******** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_registries_conf_d.yml:13 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.112) 0:00:08.153 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_registries_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle storage.conf] ****************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:130 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.055) 0:00:08.209 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure storage.conf parent dir exists] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:5 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.093) 0:00:08.303 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Update storage config file] *********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_storage_conf.yml:13 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.050) 0:00:08.354 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_storage_conf | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Handle policy.json] ******************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:133 Saturday 14 December 2024 11:33:10 -0500 (0:00:00.048) 0:00:08.403 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure policy.json parent dir exists] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:6 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.147) 0:00:08.550 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Stat the policy.json file] ************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:14 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.108) 0:00:08.658 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get the existing policy.json] ********* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:19 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.085) 0:00:08.744 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Write new policy.json file] *********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_policy_json.yml:25 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.083) 0:00:08.828 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_policy_json | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage firewall for specified ports] ************************************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:139 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.097) 0:00:08.925 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_firewall | length > 0", "skip_reason": "Conditional result was False" } TASK [Manage selinux for specified ports] ************************************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:146 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.106) 0:00:09.032 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_selinux_ports | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Keep track of users that need to cancel linger] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:153 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.084) 0:00:09.117 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_cancel_user_linger": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Handle certs.d files - present] ******* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:157 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.058) 0:00:09.176 ***** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle credential files - present] **** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:166 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.046) 0:00:09.222 ***** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle secrets] *********************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:175 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.053) 0:00:09.276 ***** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Kubernetes specifications] ***** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:182 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.052) 0:00:09.328 ***** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Handle Quadlet specifications] ******** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:189 Saturday 14 December 2024 11:33:11 -0500 (0:00:00.053) 0:00:09.382 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node1 => (item=(censored due to no_log)) included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml for managed-node1 => (item=(censored due to no_log)) TASK [fedora.linux_system_roles.podman : Set per-container variables part 0] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:14 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.140) 0:00:09.523 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_file_src": "", "__podman_quadlet_spec": { "Pod": { "PodName": "quadlet-pod" } }, "__podman_quadlet_str": "", "__podman_quadlet_template_src": "" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 1] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:25 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.110) 0:00:09.633 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_continue_if_pull_fails": false, "__podman_pull_image": true, "__podman_state": "created", "__podman_systemd_unit_scope": "", "__podman_user": "root" }, "changed": false } TASK [fedora.linux_system_roles.podman : Fail if no quadlet spec is given] ***** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:35 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.081) 0:00:09.715 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_spec | length == 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 2] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:48 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.049) 0:00:09.764 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_name": "quadlet-pod-pod", "__podman_quadlet_type": "pod", "__podman_rootless": false }, "changed": false } TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:57 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.072) 0:00:09.836 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.154) 0:00:09.991 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "'getent_passwd' not in ansible_facts or __podman_user not in ansible_facts['getent_passwd']", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.067) 0:00:10.059 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not ansible_facts[\"getent_passwd\"][__podman_user]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set group for podman user] ************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:16 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.063) 0:00:10.122 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_group": "0" }, "changed": false } TASK [fedora.linux_system_roles.podman : See if getsubids exists] ************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:28 Saturday 14 December 2024 11:33:12 -0500 (0:00:00.073) 0:00:10.196 ***** ok: [managed-node1] => { "changed": false, "stat": { "atime": 1734193880.1628356, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 32, "charset": "binary", "checksum": "89ab10a2a8fa81bcc0c1df0058f200469ce46f97", "ctime": 1734193861.1678128, "dev": 51714, "device_type": 0, "executable": true, "exists": true, "gid": 0, "gr_name": "root", "inode": 8859182, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "application/x-pie-executable", "mode": "0755", "mtime": 1730678400.0, "nlink": 1, "path": "/usr/bin/getsubids", "pw_name": "root", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 15744, "uid": 0, "version": "2878164177", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": true, "xoth": true, "xusr": true } } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subuids] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:39 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.467) 0:00:10.664 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check with getsubids for user subgids] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:44 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.050) 0:00:10.714 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:49 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.042) 0:00:10.757 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_user not in [\"root\", \"0\"]", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subuid file] ********************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:62 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.037) 0:00:10.794 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get subgid file] ********************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:67 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.028) 0:00:10.823 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set user subuid and subgid info] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:72 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.028) 0:00:10.851 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subuid file] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:82 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.027) 0:00:10.879 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if user not in subgid file] ****** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:89 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.029) 0:00:10.909 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_stat_getsubids.stat.exists", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 3] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:62 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.027) 0:00:10.937 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_activate_systemd_unit": true, "__podman_images_found": [], "__podman_kube_yamls_raw": "", "__podman_service_name": "quadlet-pod-pod-pod.service", "__podman_systemd_scope": "system", "__podman_user_home_dir": "/root", "__podman_xdg_runtime_dir": "/run/user/0" }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 4] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:73 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.067) 0:00:11.004 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_quadlet_path": "/etc/containers/systemd" }, "changed": false } TASK [fedora.linux_system_roles.podman : Get kube yaml contents] *************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:77 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.083) 0:00:11.088 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_kube_yamls_raw | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set per-container variables part 5] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:87 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.074) 0:00:11.163 ***** ok: [managed-node1] => { "ansible_facts": { "__podman_images": [], "__podman_quadlet_file": "/etc/containers/systemd/quadlet-pod-pod.pod", "__podman_volumes": [] }, "changed": false } TASK [fedora.linux_system_roles.podman : Set per-container variables part 6] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:105 Saturday 14 December 2024 11:33:13 -0500 (0:00:00.148) 0:00:11.311 ***** ok: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Cleanup quadlets] ********************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:112 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.127) 0:00:11.439 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_state == \"absent\"", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create and update quadlets] *********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_quadlet_spec.yml:116 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.049) 0:00:11.488 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Manage linger] ************************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:2 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.114) 0:00:11.603 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Enable linger if needed] ************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:12 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.130) 0:00:11.733 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user as not yet needing to cancel linger] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:18 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.089) 0:00:11.823 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Mark user for possible linger cancel] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/manage_linger.yml:22 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.053) 0:00:11.876 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_rootless | bool", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Create host directories] ************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:7 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.053) 0:00:11.930 ***** skipping: [managed-node1] => { "changed": false, "skipped_reason": "No items in the list" } TASK [fedora.linux_system_roles.podman : Ensure container images are present] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:18 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.074) 0:00:12.005 ***** skipping: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Ensure the quadlet directory is present] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Saturday 14 December 2024 11:33:14 -0500 (0:00:00.038) 0:00:12.044 ***** ok: [managed-node1] => { "changed": false, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/etc/containers/systemd", "secontext": "system_u:object_r:etc_t:s0", "size": 6, "state": "directory", "uid": 0 } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is copied] ******** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:48 Saturday 14 December 2024 11:33:15 -0500 (0:00:00.518) 0:00:12.562 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_file_src | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file content is present] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:58 Saturday 14 December 2024 11:33:15 -0500 (0:00:00.074) 0:00:12.637 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_quadlet_str | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure quadlet file is present] ******* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:70 Saturday 14 December 2024 11:33:15 -0500 (0:00:00.112) 0:00:12.749 ***** changed: [managed-node1] => { "changed": true, "checksum": "1884c880482430d8bf2e944b003734fb8b7a462d", "dest": "/etc/containers/systemd/quadlet-pod-pod.pod", "gid": 0, "group": "root", "md5sum": "43c9e9c2ff3ad9cd27c1f2d12f03aee0", "mode": "0644", "owner": "root", "secontext": "system_u:object_r:etc_t:s0", "size": 70, "src": "/root/.ansible/tmp/ansible-tmp-1734193995.4057543-13488-229431513263554/.source.pod", "state": "file", "uid": 0 } TASK [fedora.linux_system_roles.podman : Reload systemctl] ********************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:82 Saturday 14 December 2024 11:33:16 -0500 (0:00:01.099) 0:00:13.849 ***** ok: [managed-node1] => { "changed": false, "name": null, "status": {} } TASK [fedora.linux_system_roles.podman : Start service] ************************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:110 Saturday 14 December 2024 11:33:17 -0500 (0:00:01.190) 0:00:15.039 ***** fatal: [managed-node1]: FAILED! => { "changed": false } MSG: Unable to start service quadlet-pod-pod-pod.service: Job for quadlet-pod-pod-pod.service failed because the control process exited with error code. See "systemctl status quadlet-pod-pod-pod.service" and "journalctl -xeu quadlet-pod-pod-pod.service" for details. TASK [Debug3] ****************************************************************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:127 Saturday 14 December 2024 11:33:18 -0500 (0:00:00.746) 0:00:15.785 ***** ok: [managed-node1] => { "changed": false, "cmd": "set -x\nset -o pipefail\nexec 1>&2\n#podman volume rm --all\n#podman network prune -f\npodman volume ls\npodman network ls\npodman secret ls\npodman container ls\npodman pod ls\npodman images\nsystemctl list-units | grep quadlet\nsystemctl list-unit-files | grep quadlet\nls -alrtF /etc/containers/systemd\n/usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log\n", "delta": "0:00:00.640839", "end": "2024-12-14 11:33:19.352339", "rc": 0, "start": "2024-12-14 11:33:18.711500" } STDERR: + set -o pipefail + exec + podman volume ls time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" + podman network ls time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" NETWORK ID NAME DRIVER 2f259bab93aa podman bridge 1c694e182c0d podman-default-kube-network bridge + podman secret ls time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" ID NAME DRIVER CREATED UPDATED + podman container ls time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + podman pod ls time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" POD ID NAME STATUS CREATED INFRA ID # OF CONTAINERS + podman images time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" REPOSITORY TAG IMAGE ID CREATED SIZE quay.io/libpod/registry 2.8.2 0030ba3d620c 16 months ago 24.6 MB localhost:5000/libpod/testimage 20210610 9f9ec7f2fdef 3 years ago 7.99 MB quay.io/libpod/testimage 20210610 9f9ec7f2fdef 3 years ago 7.99 MB + systemctl list-units + grep quadlet ● quadlet-pod-pod-pod.service loaded activating start-pre start quadlet-pod-pod-pod.service + systemctl list-unit-files + grep quadlet quadlet-pod-pod-pod.service generated - + ls -alrtF /etc/containers/systemd total 4 drwxr-xr-x. 9 root root 178 Dec 14 11:32 ../ -rw-r--r--. 1 root root 70 Dec 14 11:33 quadlet-pod-pod.pod drwxr-xr-x. 2 root root 33 Dec 14 11:33 ./ + /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log quadlet-generator[23314]: Loading source unit file /etc/containers/systemd/quadlet-pod-pod.pod ---quadlet-pod-pod-pod.service--- # # Ansible managed # # system_role:podman [X-Pod] PodName=quadlet-pod [Unit] Wants=network-online.target After=network-online.target SourcePath=/etc/containers/systemd/quadlet-pod-pod.pod RequiresMountsFor=%t/containers [Service] SyslogIdentifier=%N ExecStart=/usr/bin/podman pod start --pod-id-file=%t/%N.pod-id ExecStop=/usr/bin/podman pod stop --pod-id-file=%t/%N.pod-id --ignore --time=10 ExecStopPost=/usr/bin/podman pod rm --pod-id-file=%t/%N.pod-id --ignore --force ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile=%t/%N.pid --pod-id-file=%t/%N.pod-id --exit-policy=stop --replace --infra-name quadlet-pod-infra --name quadlet-pod Environment=PODMAN_SYSTEMD_UNIT=%n Type=forking Restart=on-failure PIDFile=%t/%N.pid TASK [Check AVCs] ************************************************************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:146 Saturday 14 December 2024 11:33:19 -0500 (0:00:01.111) 0:00:16.897 ***** ok: [managed-node1] => { "changed": false, "cmd": [ "grep", "type=AVC", "/var/log/audit/audit.log" ], "delta": "0:00:00.005016", "end": "2024-12-14 11:33:19.855419", "failed_when_result": false, "rc": 1, "start": "2024-12-14 11:33:19.850403" } MSG: non-zero return code TASK [Dump journal] ************************************************************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:151 Saturday 14 December 2024 11:33:19 -0500 (0:00:00.509) 0:00:17.407 ***** fatal: [managed-node1]: FAILED! => { "changed": false, "cmd": [ "journalctl", "-ex" ], "delta": "0:00:00.027939", "end": "2024-12-14 11:33:20.311044", "failed_when_result": true, "rc": 0, "start": "2024-12-14 11:33:20.283105" } STDOUT: Dec 14 11:26:32 localhost systemd-journald[523]: Collecting audit messages is disabled. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-udev-trigger.service - Coldplug All udev Devices... Dec 14 11:26:32 localhost systemd[1]: Mounted dev-hugepages.mount - Huge Pages File System. Dec 14 11:26:32 localhost systemd[1]: Mounted dev-mqueue.mount - POSIX Message Queue File System. Dec 14 11:26:32 localhost systemd[1]: Mounted sys-kernel-debug.mount - Kernel Debug File System. Dec 14 11:26:32 localhost systemd[1]: Mounted sys-kernel-tracing.mount - Kernel Trace File System. Dec 14 11:26:32 localhost systemd[1]: Finished kmod-static-nodes.service - Create List of Static Device Nodes. Dec 14 11:26:32 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@configfs.service - Load Kernel Module configfs. Dec 14 11:26:32 localhost systemd[1]: modprobe@dm_mod.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@dm_mod.service - Load Kernel Module dm_mod. Dec 14 11:26:32 localhost systemd[1]: modprobe@drm.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@drm.service - Load Kernel Module drm. Dec 14 11:26:32 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@efi_pstore.service - Load Kernel Module efi_pstore. Dec 14 11:26:32 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@fuse.service - Load Kernel Module fuse. Dec 14 11:26:32 localhost systemd[1]: modprobe@loop.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@loop.service - Load Kernel Module loop. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-network-generator.service - Generate network units from Kernel command line. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-remount-fs.service - Remount Root and Kernel File Systems. Dec 14 11:26:32 localhost systemd-journald[523]: Journal started ░░ Subject: The journal has been started ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The system journal process has started up, opened the journal ░░ files for writing and is now ready to process requests. Dec 14 11:26:32 localhost systemd-journald[523]: Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is 8M, max 70.5M, 62.5M free. ░░ Subject: Disk space used by the journal ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is currently using 8M. ░░ Maximum allowed usage is set to 70.5M. ░░ Leaving at least 35.2M free (of currently available 689.3M of disk space). ░░ Enforced usage limit is thus 70.5M, of which 62.5M are still available. ░░ ░░ The limits controlling how much disk space is used by the journal may ░░ be configured with SystemMaxUse=, SystemKeepFree=, SystemMaxFileSize=, ░░ RuntimeMaxUse=, RuntimeKeepFree=, RuntimeMaxFileSize= settings in ░░ /etc/systemd/journald.conf. See journald.conf(5) for details. Dec 14 11:26:31 localhost systemd[1]: Queued start job for default target multi-user.target. Dec 14 11:26:31 localhost systemd[1]: systemd-journald.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-journald.service has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Started systemd-journald.service - Journal Service. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-sysctl.service - Apply Kernel Variables. ░░ Subject: A start job for unit systemd-sysctl.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysctl.service has finished successfully. ░░ ░░ The job identifier is 181. Dec 14 11:26:32 localhost systemd[1]: systemd-hwdb-update.service - Rebuild Hardware Database was skipped because of an unmet condition check (ConditionNeedsUpdate=/etc). ░░ Subject: A start job for unit systemd-hwdb-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hwdb-update.service has finished successfully. ░░ ░░ The job identifier is 177. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-journal-flush.service - Flush Journal to Persistent Storage... ░░ Subject: A start job for unit systemd-journal-flush.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-journal-flush.service has begun execution. ░░ ░░ The job identifier is 152. Dec 14 11:26:32 localhost systemd[1]: systemd-pstore.service - Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore). ░░ Subject: A start job for unit systemd-pstore.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pstore.service has finished successfully. ░░ ░░ The job identifier is 147. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-random-seed.service - Load/Save OS Random Seed... ░░ Subject: A start job for unit systemd-random-seed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-random-seed.service has begun execution. ░░ ░░ The job identifier is 137. Dec 14 11:26:32 localhost systemd[1]: systemd-repart.service - Repartition Root Disk was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-repart.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-repart.service has finished successfully. ░░ ░░ The job identifier is 160. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-tmpfiles-setup-dev-early.service - Create Static Device Nodes in /dev gracefully... ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev-early.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev-early.service has begun execution. ░░ ░░ The job identifier is 198. Dec 14 11:26:32 localhost systemd[1]: systemd-tpm2-setup.service - TPM SRK Setup was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-tpm2-setup.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tpm2-setup.service has finished successfully. ░░ ░░ The job identifier is 151. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-udev-load-credentials.service - Load udev Rules from Credentials. ░░ Subject: A start job for unit systemd-udev-load-credentials.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udev-load-credentials.service has finished successfully. ░░ ░░ The job identifier is 173. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-random-seed.service - Load/Save OS Random Seed. ░░ Subject: A start job for unit systemd-random-seed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-random-seed.service has finished successfully. ░░ ░░ The job identifier is 137. Dec 14 11:26:32 localhost systemd[1]: Finished lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling. ░░ Subject: A start job for unit lvm2-monitor.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit lvm2-monitor.service has finished successfully. ░░ ░░ The job identifier is 186. Dec 14 11:26:32 localhost systemd-journald[523]: Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is 8M, max 70.5M, 62.5M free. ░░ Subject: Disk space used by the journal ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is currently using 8M. ░░ Maximum allowed usage is set to 70.5M. ░░ Leaving at least 35.2M free (of currently available 689.3M of disk space). ░░ Enforced usage limit is thus 70.5M, of which 62.5M are still available. ░░ ░░ The limits controlling how much disk space is used by the journal may ░░ be configured with SystemMaxUse=, SystemKeepFree=, SystemMaxFileSize=, ░░ RuntimeMaxUse=, RuntimeKeepFree=, RuntimeMaxFileSize= settings in ░░ /etc/systemd/journald.conf. See journald.conf(5) for details. Dec 14 11:26:32 localhost systemd-journald[523]: Received client request to flush runtime journal. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-journal-flush.service - Flush Journal to Persistent Storage. ░░ Subject: A start job for unit systemd-journal-flush.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-journal-flush.service has finished successfully. ░░ ░░ The job identifier is 152. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-udev-trigger.service - Coldplug All udev Devices. ░░ Subject: A start job for unit systemd-udev-trigger.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udev-trigger.service has finished successfully. ░░ ░░ The job identifier is 185. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-tmpfiles-setup-dev-early.service - Create Static Device Nodes in /dev gracefully. ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev-early.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev-early.service has finished successfully. ░░ ░░ The job identifier is 198. Dec 14 11:26:32 localhost systemd[1]: systemd-sysusers.service - Create System Users was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-sysusers.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysusers.service has finished successfully. ░░ ░░ The job identifier is 182. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev... ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev.service has begun execution. ░░ ░░ The job identifier is 145. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev. ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev.service has finished successfully. ░░ ░░ The job identifier is 145. Dec 14 11:26:32 localhost systemd[1]: Reached target local-fs-pre.target - Preparation for Local File Systems. ░░ Subject: A start job for unit local-fs-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit local-fs-pre.target has finished successfully. ░░ ░░ The job identifier is 144. Dec 14 11:26:32 localhost systemd[1]: Reached target local-fs.target - Local File Systems. ░░ Subject: A start job for unit local-fs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit local-fs.target has finished successfully. ░░ ░░ The job identifier is 142. Dec 14 11:26:32 localhost systemd[1]: Listening on systemd-bootctl.socket - Boot Entries Service Socket. ░░ Subject: A start job for unit systemd-bootctl.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-bootctl.socket has finished successfully. ░░ ░░ The job identifier is 213. Dec 14 11:26:32 localhost systemd[1]: Listening on systemd-sysext.socket - System Extension Image Management. ░░ Subject: A start job for unit systemd-sysext.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysext.socket has finished successfully. ░░ ░░ The job identifier is 220. Dec 14 11:26:32 localhost systemd[1]: ldconfig.service - Rebuild Dynamic Linker Cache was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit ldconfig.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ldconfig.service has finished successfully. ░░ ░░ The job identifier is 146. Dec 14 11:26:32 localhost systemd[1]: selinux-autorelabel-mark.service - Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux). ░░ Subject: A start job for unit selinux-autorelabel-mark.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit selinux-autorelabel-mark.service has finished successfully. ░░ ░░ The job identifier is 190. Dec 14 11:26:32 localhost systemd[1]: systemd-binfmt.service - Set Up Additional Binary Formats was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-binfmt.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-binfmt.service has finished successfully. ░░ ░░ The job identifier is 193. Dec 14 11:26:32 localhost systemd[1]: systemd-boot-random-seed.service - Update Boot Loader Random Seed was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-boot-random-seed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-boot-random-seed.service has finished successfully. ░░ ░░ The job identifier is 179. Dec 14 11:26:32 localhost systemd[1]: systemd-confext.service - Merge System Configuration Images into /etc/ was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-confext.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-confext.service has finished successfully. ░░ ░░ The job identifier is 157. Dec 14 11:26:32 localhost systemd[1]: systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/ was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-sysext.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysext.service has finished successfully. ░░ ░░ The job identifier is 189. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-tmpfiles-setup.service - Create System Files and Directories... ░░ Subject: A start job for unit systemd-tmpfiles-setup.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup.service has begun execution. ░░ ░░ The job identifier is 139. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-udevd.service - Rule-based Manager for Device Events and Files... ░░ Subject: A start job for unit systemd-udevd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udevd.service has begun execution. ░░ ░░ The job identifier is 172. Dec 14 11:26:32 localhost systemd-udevd[562]: Using default interface naming scheme 'rhel-10.0-beta'. Dec 14 11:26:32 localhost systemd[1]: Started systemd-udevd.service - Rule-based Manager for Device Events and Files. ░░ Subject: A start job for unit systemd-udevd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udevd.service has finished successfully. ░░ ░░ The job identifier is 172. Dec 14 11:26:32 localhost systemd[1]: Starting modprobe@fuse.service - Load Kernel Module fuse... ░░ Subject: A start job for unit modprobe@fuse.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@fuse.service has begun execution. ░░ ░░ The job identifier is 294. Dec 14 11:26:32 localhost systemd[1]: Starting modprobe@configfs.service - Load Kernel Module configfs... ░░ Subject: A start job for unit modprobe@configfs.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@configfs.service has begun execution. ░░ ░░ The job identifier is 302. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-tmpfiles-setup.service - Create System Files and Directories. ░░ Subject: A start job for unit systemd-tmpfiles-setup.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup.service has finished successfully. ░░ ░░ The job identifier is 139. Dec 14 11:26:32 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit modprobe@fuse.service has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@fuse.service - Load Kernel Module fuse. ░░ Subject: A start job for unit modprobe@fuse.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@fuse.service has finished successfully. ░░ ░░ The job identifier is 294. Dec 14 11:26:32 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit modprobe@configfs.service has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@configfs.service - Load Kernel Module configfs. ░░ Subject: A start job for unit modprobe@configfs.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@configfs.service has finished successfully. ░░ ░░ The job identifier is 302. Dec 14 11:26:32 localhost systemd[1]: Starting audit-rules.service - Load Audit Rules... ░░ Subject: A start job for unit audit-rules.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit audit-rules.service has begun execution. ░░ ░░ The job identifier is 236. Dec 14 11:26:32 localhost systemd[1]: Starting rpcbind.service - RPC Bind... ░░ Subject: A start job for unit rpcbind.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpcbind.service has begun execution. ░░ ░░ The job identifier is 253. Dec 14 11:26:32 localhost systemd[1]: systemd-firstboot.service - First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes). ░░ Subject: A start job for unit systemd-firstboot.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-firstboot.service has finished successfully. ░░ ░░ The job identifier is 180. Dec 14 11:26:32 localhost systemd[1]: first-boot-complete.target - First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes). ░░ Subject: A start job for unit first-boot-complete.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit first-boot-complete.target has finished successfully. ░░ ░░ The job identifier is 138. Dec 14 11:26:32 localhost systemd[1]: systemd-journal-catalog-update.service - Rebuild Journal Catalog was skipped because of an unmet condition check (ConditionNeedsUpdate=/var). ░░ Subject: A start job for unit systemd-journal-catalog-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-journal-catalog-update.service has finished successfully. ░░ ░░ The job identifier is 131. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-machine-id-commit.service - Save Transient machine-id to Disk... ░░ Subject: A start job for unit systemd-machine-id-commit.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-machine-id-commit.service has begun execution. ░░ ░░ The job identifier is 194. Dec 14 11:26:32 localhost systemd[1]: systemd-update-done.service - Update is Completed was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-update-done.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-done.service has finished successfully. ░░ ░░ The job identifier is 168. Dec 14 11:26:32 localhost systemd[1]: Condition check resulted in dev-ttyS0.device - /dev/ttyS0 being skipped. ░░ Subject: A start job for unit dev-ttyS0.device has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dev-ttyS0.device has finished successfully. ░░ ░░ The job identifier is 232. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-machine-id-commit.service - Save Transient machine-id to Disk. ░░ Subject: A start job for unit systemd-machine-id-commit.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-machine-id-commit.service has finished successfully. ░░ ░░ The job identifier is 194. Dec 14 11:26:32 localhost systemd[1]: run-credentials-systemd\x2dnetwork\x2dgenerator.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dnetwork\x2dgenerator.service.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: run-credentials-systemd\x2dsysctl.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dsysctl.service.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: run-credentials-systemd\x2dudev\x2dload\x2dcredentials.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dudev\x2dload\x2dcredentials.service.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: etc-machine\x2did.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit etc-machine\x2did.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Mounting sys-fs-fuse-connections.mount - FUSE Control File System... ░░ Subject: A start job for unit sys-fs-fuse-connections.mount has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sys-fs-fuse-connections.mount has begun execution. ░░ ░░ The job identifier is 166. Dec 14 11:26:32 localhost systemd[1]: Mounting var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System... ░░ Subject: A start job for unit var-lib-nfs-rpc_pipefs.mount has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit var-lib-nfs-rpc_pipefs.mount has begun execution. ░░ ░░ The job identifier is 247. Dec 14 11:26:32 localhost systemd[1]: Mounted sys-fs-fuse-connections.mount - FUSE Control File System. ░░ Subject: A start job for unit sys-fs-fuse-connections.mount has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sys-fs-fuse-connections.mount has finished successfully. ░░ ░░ The job identifier is 166. Dec 14 11:26:32 localhost (udev-worker)[574]: Network interface NamePolicy= disabled on kernel command line. Dec 14 11:26:32 localhost kernel: RPC: Registered named UNIX socket transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered udp transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered tcp transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered tcp-with-tls transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered tcp NFSv4.1 backchannel transport module. Dec 14 11:26:32 localhost systemd[1]: Mounted var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System. ░░ Subject: A start job for unit var-lib-nfs-rpc_pipefs.mount has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit var-lib-nfs-rpc_pipefs.mount has finished successfully. ░░ ░░ The job identifier is 247. Dec 14 11:26:32 localhost systemd[1]: Reached target rpc_pipefs.target. ░░ Subject: A start job for unit rpc_pipefs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc_pipefs.target has finished successfully. ░░ ░░ The job identifier is 246. Dec 14 11:26:33 localhost augenrules[584]: /sbin/augenrules: No change Dec 14 11:26:33 localhost augenrules[615]: No rules Dec 14 11:26:33 localhost augenrules[615]: enabled 0 Dec 14 11:26:33 localhost augenrules[615]: failure 1 Dec 14 11:26:33 localhost augenrules[615]: pid 0 Dec 14 11:26:33 localhost augenrules[615]: rate_limit 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_limit 8192 Dec 14 11:26:33 localhost augenrules[615]: lost 0 Dec 14 11:26:33 localhost augenrules[615]: backlog 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time 60000 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time_actual 0 Dec 14 11:26:33 localhost augenrules[615]: enabled 0 Dec 14 11:26:33 localhost augenrules[615]: failure 1 Dec 14 11:26:33 localhost augenrules[615]: pid 0 Dec 14 11:26:33 localhost augenrules[615]: rate_limit 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_limit 8192 Dec 14 11:26:33 localhost augenrules[615]: lost 0 Dec 14 11:26:33 localhost augenrules[615]: backlog 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time 60000 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time_actual 0 Dec 14 11:26:33 localhost augenrules[615]: enabled 0 Dec 14 11:26:33 localhost augenrules[615]: failure 1 Dec 14 11:26:33 localhost augenrules[615]: pid 0 Dec 14 11:26:33 localhost augenrules[615]: rate_limit 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_limit 8192 Dec 14 11:26:33 localhost augenrules[615]: lost 0 Dec 14 11:26:33 localhost augenrules[615]: backlog 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time 60000 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time_actual 0 Dec 14 11:26:33 localhost systemd[1]: audit-rules.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit audit-rules.service has successfully entered the 'dead' state. Dec 14 11:26:33 localhost systemd[1]: Finished audit-rules.service - Load Audit Rules. ░░ Subject: A start job for unit audit-rules.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit audit-rules.service has finished successfully. ░░ ░░ The job identifier is 236. Dec 14 11:26:33 localhost systemd[1]: Starting auditd.service - Security Audit Logging Service... ░░ Subject: A start job for unit auditd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit auditd.service has begun execution. ░░ ░░ The job identifier is 235. Dec 14 11:26:33 localhost kernel: input: PC Speaker as /devices/platform/pcspkr/input/input5 Dec 14 11:26:33 localhost kernel: cirrus 0000:00:02.0: vgaarb: deactivate vga console Dec 14 11:26:33 localhost kernel: Console: switching to colour dummy device 80x25 Dec 14 11:26:33 localhost kernel: [drm] Initialized cirrus 2.0.0 for 0000:00:02.0 on minor 0 Dec 14 11:26:33 localhost kernel: fbcon: cirrusdrmfb (fb0) is primary device Dec 14 11:26:33 localhost kernel: Console: switching to colour frame buffer device 128x48 Dec 14 11:26:33 localhost kernel: cirrus 0000:00:02.0: [drm] fb0: cirrusdrmfb frame buffer device Dec 14 11:26:33 localhost systemd[1]: Started auditd.service - Security Audit Logging Service. ░░ Subject: A start job for unit auditd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit auditd.service has finished successfully. ░░ ░░ The job identifier is 235. Dec 14 11:26:33 localhost auditd[625]: No plugins found, not dispatching events Dec 14 11:26:33 localhost systemd[1]: Starting systemd-update-utmp.service - Record System Boot/Shutdown in UTMP... ░░ Subject: A start job for unit systemd-update-utmp.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp.service has begun execution. ░░ ░░ The job identifier is 258. Dec 14 11:26:33 localhost auditd[625]: Init complete, auditd 4.0 listening for events (startup state enable) Dec 14 11:26:33 localhost systemd[1]: Started rpcbind.service - RPC Bind. ░░ Subject: A start job for unit rpcbind.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpcbind.service has finished successfully. ░░ ░░ The job identifier is 253. Dec 14 11:26:33 localhost systemd[1]: Finished systemd-update-utmp.service - Record System Boot/Shutdown in UTMP. ░░ Subject: A start job for unit systemd-update-utmp.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp.service has finished successfully. ░░ ░░ The job identifier is 258. Dec 14 11:26:33 localhost systemd[1]: Reached target sysinit.target - System Initialization. ░░ Subject: A start job for unit sysinit.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sysinit.target has finished successfully. ░░ ░░ The job identifier is 125. Dec 14 11:26:33 localhost systemd[1]: Started dnf-makecache.timer - dnf makecache --timer. ░░ Subject: A start job for unit dnf-makecache.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dnf-makecache.timer has finished successfully. ░░ ░░ The job identifier is 202. Dec 14 11:26:33 localhost kernel: RAPL PMU: API unit is 2^-32 Joules, 0 fixed counters, 655360 ms ovfl timer Dec 14 11:26:33 localhost systemd[1]: Started fstrim.timer - Discard unused filesystem blocks once a week. ░░ Subject: A start job for unit fstrim.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.timer has finished successfully. ░░ ░░ The job identifier is 201. Dec 14 11:26:33 localhost kernel: piix4_smbus 0000:00:01.3: SMBus base address uninitialized - upgrade BIOS or use force_addr=0xaddr Dec 14 11:26:33 localhost systemd[1]: Started logrotate.timer - Daily rotation of log files. ░░ Subject: A start job for unit logrotate.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit logrotate.timer has finished successfully. ░░ ░░ The job identifier is 209. Dec 14 11:26:33 localhost systemd[1]: Started systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories. ░░ Subject: A start job for unit systemd-tmpfiles-clean.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-clean.timer has finished successfully. ░░ ░░ The job identifier is 210. Dec 14 11:26:33 localhost systemd[1]: Reached target timers.target - Timer Units. ░░ Subject: A start job for unit timers.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit timers.target has finished successfully. ░░ ░░ The job identifier is 200. Dec 14 11:26:33 localhost systemd[1]: Listening on dbus.socket - D-Bus System Message Bus Socket. ░░ Subject: A start job for unit dbus.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dbus.socket has finished successfully. ░░ ░░ The job identifier is 206. Dec 14 11:26:33 localhost systemd[1]: Listening on pcscd.socket - PC/SC Smart Card Daemon Activation Socket. ░░ Subject: A start job for unit pcscd.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit pcscd.socket has finished successfully. ░░ ░░ The job identifier is 222. Dec 14 11:26:33 localhost systemd[1]: Listening on sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket. ░░ Subject: A start job for unit sssd-kcm.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sssd-kcm.socket has finished successfully. ░░ ░░ The job identifier is 214. Dec 14 11:26:33 localhost systemd[1]: Listening on systemd-hostnamed.socket - Hostname Service Socket. ░░ Subject: A start job for unit systemd-hostnamed.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.socket has finished successfully. ░░ ░░ The job identifier is 223. Dec 14 11:26:33 localhost systemd[1]: Reached target sockets.target - Socket Units. ░░ Subject: A start job for unit sockets.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sockets.target has finished successfully. ░░ ░░ The job identifier is 211. Dec 14 11:26:33 localhost systemd[1]: Starting dbus-broker.service - D-Bus System Message Bus... ░░ Subject: A start job for unit dbus-broker.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dbus-broker.service has begun execution. ░░ ░░ The job identifier is 207. Dec 14 11:26:33 localhost systemd[1]: systemd-pcrphase-sysinit.service - TPM PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-pcrphase-sysinit.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pcrphase-sysinit.service has finished successfully. ░░ ░░ The job identifier is 135. Dec 14 11:26:33 localhost systemd[1]: Starting systemd-vconsole-setup.service - Virtual Console Setup... ░░ Subject: A start job for unit systemd-vconsole-setup.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-vconsole-setup.service has begun execution. ░░ ░░ The job identifier is 318. Dec 14 11:26:33 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-vconsole-setup.service has successfully entered the 'dead' state. Dec 14 11:26:33 localhost systemd[1]: Stopped systemd-vconsole-setup.service - Virtual Console Setup. ░░ Subject: A stop job for unit systemd-vconsole-setup.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit systemd-vconsole-setup.service has finished. ░░ ░░ The job identifier is 318 and the job result is done. Dec 14 11:26:33 localhost systemd[1]: Starting systemd-vconsole-setup.service - Virtual Console Setup... ░░ Subject: A start job for unit systemd-vconsole-setup.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-vconsole-setup.service has begun execution. ░░ ░░ The job identifier is 318. Dec 14 11:26:33 localhost systemd[1]: Started dbus-broker.service - D-Bus System Message Bus. ░░ Subject: A start job for unit dbus-broker.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dbus-broker.service has finished successfully. ░░ ░░ The job identifier is 207. Dec 14 11:26:33 localhost systemd[1]: Reached target basic.target - Basic System. ░░ Subject: A start job for unit basic.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit basic.target has finished successfully. ░░ ░░ The job identifier is 122. Dec 14 11:26:33 localhost dbus-broker-launch[637]: Ready Dec 14 11:26:33 localhost systemd[1]: Starting chronyd.service - NTP client/server... ░░ Subject: A start job for unit chronyd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit chronyd.service has begun execution. ░░ ░░ The job identifier is 268. Dec 14 11:26:33 localhost systemd[1]: Starting cloud-init-local.service - Initial cloud-init job (pre-networking)... ░░ Subject: A start job for unit cloud-init-local.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init-local.service has begun execution. ░░ ░░ The job identifier is 275. Dec 14 11:26:33 localhost systemd[1]: Starting dracut-shutdown.service - Restore /run/initramfs on shutdown... ░░ Subject: A start job for unit dracut-shutdown.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dracut-shutdown.service has begun execution. ░░ ░░ The job identifier is 184. Dec 14 11:26:33 localhost systemd[1]: Started irqbalance.service - irqbalance daemon. ░░ Subject: A start job for unit irqbalance.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit irqbalance.service has finished successfully. ░░ ░░ The job identifier is 234. Dec 14 11:26:33 localhost systemd[1]: Started rngd.service - Hardware RNG Entropy Gatherer Daemon. ░░ Subject: A start job for unit rngd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rngd.service has finished successfully. ░░ ░░ The job identifier is 259. Dec 14 11:26:33 localhost systemd[1]: Starting rsyslog.service - System Logging Service... ░░ Subject: A start job for unit rsyslog.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has begun execution. ░░ ░░ The job identifier is 272. Dec 14 11:26:33 localhost systemd[1]: ssh-host-keys-migration.service - Update OpenSSH host key permissions was skipped because of an unmet condition check (ConditionPathExists=!/var/lib/.ssh-host-keys-migration). ░░ Subject: A start job for unit ssh-host-keys-migration.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ssh-host-keys-migration.service has finished successfully. ░░ ░░ The job identifier is 267. Dec 14 11:26:33 localhost systemd[1]: sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ecdsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ecdsa.service has finished successfully. ░░ ░░ The job identifier is 263. Dec 14 11:26:33 localhost systemd[1]: sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ed25519.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ed25519.service has finished successfully. ░░ ░░ The job identifier is 265. Dec 14 11:26:33 localhost systemd[1]: sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@rsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@rsa.service has finished successfully. ░░ ░░ The job identifier is 266. Dec 14 11:26:33 localhost systemd[1]: Reached target sshd-keygen.target. ░░ Subject: A start job for unit sshd-keygen.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen.target has finished successfully. ░░ ░░ The job identifier is 262. Dec 14 11:26:33 localhost systemd[1]: sssd.service - System Security Services Daemon was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit sssd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sssd.service has finished successfully. ░░ ░░ The job identifier is 237. Dec 14 11:26:33 localhost systemd[1]: Reached target nss-user-lookup.target - User and Group Name Lookups. ░░ Subject: A start job for unit nss-user-lookup.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit nss-user-lookup.target has finished successfully. ░░ ░░ The job identifier is 238. Dec 14 11:26:33 localhost systemd[1]: Starting systemd-logind.service - User Login Management... ░░ Subject: A start job for unit systemd-logind.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-logind.service has begun execution. ░░ ░░ The job identifier is 250. Dec 14 11:26:33 localhost systemd[1]: Finished dracut-shutdown.service - Restore /run/initramfs on shutdown. ░░ Subject: A start job for unit dracut-shutdown.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dracut-shutdown.service has finished successfully. ░░ ░░ The job identifier is 184. Dec 14 11:26:33 localhost (qbalance)[649]: irqbalance.service: Referenced but unset environment variable evaluates to an empty string: IRQBALANCE_ARGS Dec 14 11:26:33 localhost systemd[1]: Started rsyslog.service - System Logging Service. ░░ Subject: A start job for unit rsyslog.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has finished successfully. ░░ ░░ The job identifier is 272. Dec 14 11:26:33 localhost rsyslogd[651]: imjournal: filecreatemode is not set, using default 0644 [v8.2408.0-2.el10 try https://www.rsyslog.com/e/2186 ] Dec 14 11:26:33 localhost rsyslogd[651]: [origin software="rsyslogd" swVersion="8.2408.0-2.el10" x-pid="651" x-info="https://www.rsyslog.com"] start Dec 14 11:26:33 localhost systemd-logind[653]: New seat seat0. ░░ Subject: A new seat seat0 is now available ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new seat seat0 has been configured and is now available. Dec 14 11:26:33 localhost systemd-logind[653]: Watching system buttons on /dev/input/event0 (Power Button) Dec 14 11:26:33 localhost systemd-logind[653]: Watching system buttons on /dev/input/event1 (Sleep Button) Dec 14 11:26:33 localhost systemd-logind[653]: Watching system buttons on /dev/input/event2 (AT Translated Set 2 keyboard) Dec 14 11:26:33 localhost systemd[1]: Started systemd-logind.service - User Login Management. ░░ Subject: A start job for unit systemd-logind.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-logind.service has finished successfully. ░░ ░░ The job identifier is 250. Dec 14 11:26:33 localhost systemd[1]: Finished systemd-vconsole-setup.service - Virtual Console Setup. ░░ Subject: A start job for unit systemd-vconsole-setup.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-vconsole-setup.service has finished successfully. ░░ ░░ The job identifier is 318. Dec 14 11:26:33 localhost systemd[1]: run-credentials-systemd\x2dvconsole\x2dsetup.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dvconsole\x2dsetup.service.mount has successfully entered the 'dead' state. Dec 14 11:26:33 localhost rsyslogd[651]: imjournal: journal files changed, reloading... [v8.2408.0-2.el10 try https://www.rsyslog.com/e/0 ] Dec 14 11:26:33 localhost rngd[650]: Disabling 7: PKCS11 Entropy generator (pkcs11) Dec 14 11:26:33 localhost rngd[650]: Disabling 5: NIST Network Entropy Beacon (nist) Dec 14 11:26:33 localhost rngd[650]: Disabling 9: Qrypt quantum entropy beacon (qrypt) Dec 14 11:26:33 localhost rngd[650]: Disabling 10: Named pipe entropy input (namedpipe) Dec 14 11:26:33 localhost rngd[650]: Initializing available sources Dec 14 11:26:33 localhost rngd[650]: [hwrng ]: Initialization Failed Dec 14 11:26:33 localhost rngd[650]: [rdrand]: Enabling RDRAND rng support Dec 14 11:26:33 localhost rngd[650]: [rdrand]: Initialized Dec 14 11:26:33 localhost rngd[650]: [jitter]: JITTER timeout set to 5 sec Dec 14 11:26:33 localhost chronyd[664]: chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +NTS +SECHASH +IPV6 +DEBUG) Dec 14 11:26:33 localhost rngd[650]: [jitter]: Initializing AES buffer Dec 14 11:26:33 localhost chronyd[664]: Frequency 0.000 +/- 1000000.000 ppm read from /var/lib/chrony/drift Dec 14 11:26:33 localhost systemd[1]: Started chronyd.service - NTP client/server. ░░ Subject: A start job for unit chronyd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit chronyd.service has finished successfully. ░░ ░░ The job identifier is 268. Dec 14 11:26:33 localhost chronyd[664]: Loaded seccomp filter (level 2) Dec 14 11:26:36 localhost cloud-init[671]: Cloud-init v. 24.1.4-21.el10 running 'init-local' at Sat, 14 Dec 2024 16:26:36 +0000. Up 12.60 seconds. Dec 14 11:26:36 localhost dhcpcd[673]: dhcpcd-10.0.6 starting Dec 14 11:26:36 localhost kernel: 8021q: 802.1Q VLAN Support v1.8 Dec 14 11:26:36 localhost systemd[1]: Listening on systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch. ░░ Subject: A start job for unit systemd-rfkill.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-rfkill.socket has finished successfully. ░░ ░░ The job identifier is 328. Dec 14 11:26:37 localhost kernel: cfg80211: Loading compiled-in X.509 certificates for regulatory database Dec 14 11:26:37 localhost kernel: Loaded X.509 cert 'sforshee: 00b28ddf47aef9cea7' Dec 14 11:26:37 localhost kernel: Loaded X.509 cert 'wens: 61c038651aabdcf94bd0ac7ff06c7248db18c600' Dec 14 11:26:37 localhost dhcpcd[676]: DUID 00:01:00:01:2e:f0:6e:3d:0e:03:6a:4a:4d:55 Dec 14 11:26:37 localhost dhcpcd[676]: eth0: IAID 6a:4a:4d:55 Dec 14 11:26:37 localhost kernel: platform regulatory.0: Direct firmware load for regulatory.db failed with error -2 Dec 14 11:26:37 localhost kernel: cfg80211: failed to load regulatory.db Dec 14 11:26:38 localhost rngd[650]: [jitter]: Unable to obtain AES key, disabling JITTER source Dec 14 11:26:38 localhost rngd[650]: [jitter]: Initialization Failed Dec 14 11:26:38 localhost rngd[650]: Process privileges have been dropped to 2:2 Dec 14 11:26:38 localhost dhcpcd[676]: eth0: soliciting a DHCP lease Dec 14 11:26:38 localhost dhcpcd[676]: eth0: offered 10.31.43.117 from 10.31.40.1 Dec 14 11:26:38 localhost dhcpcd[676]: eth0: leased 10.31.43.117 for 3600 seconds Dec 14 11:26:38 localhost dhcpcd[676]: eth0: adding route to 10.31.40.0/22 Dec 14 11:26:38 localhost dhcpcd[676]: eth0: adding default route via 10.31.40.1 Dec 14 11:26:38 localhost dhcpcd[676]: control command: /usr/sbin/dhcpcd --dumplease --ipv4only eth0 Dec 14 11:26:38 localhost systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 337. Dec 14 11:26:38 localhost systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 337. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-hostnamed[696]: Hostname set to (static) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init-local.service - Initial cloud-init job (pre-networking). ░░ Subject: A start job for unit cloud-init-local.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init-local.service has finished successfully. ░░ ░░ The job identifier is 275. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target network-pre.target - Preparation for Network. ░░ Subject: A start job for unit network-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-pre.target has finished successfully. ░░ ░░ The job identifier is 156. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager.service - Network Manager... ░░ Subject: A start job for unit NetworkManager.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has begun execution. ░░ ░░ The job identifier is 205. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7758] NetworkManager (version 1.51.4-1.el10) is starting... (boot:38eff4b5-157f-400c-9c9a-01c5bd7302d2) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7760] Read config: /etc/NetworkManager/NetworkManager.conf, /etc/NetworkManager/conf.d/30-cloud-init-ip6-addr-gen-mode.conf Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7889] manager[0x557f01cc5a10]: monitoring kernel firmware directory '/lib/firmware'. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7922] hostname: hostname: using hostnamed Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7922] hostname: static hostname changed from (none) to "ip-10-31-43-117.us-east-1.aws.redhat.com" Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7926] dns-mgr: init: dns=default,systemd-resolved rc-manager=symlink (auto) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7930] manager[0x557f01cc5a10]: rfkill: Wi-Fi hardware radio set enabled Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7930] manager[0x557f01cc5a10]: rfkill: WWAN hardware radio set enabled Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7986] manager: rfkill: Wi-Fi enabled by radio killswitch; enabled by state file Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7987] manager: rfkill: WWAN enabled by radio killswitch; enabled by state file Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7987] manager: Networking is enabled by state file Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8012] settings: Loaded settings plugin: keyfile (internal) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 415. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8087] dhcp: init: Using DHCP client 'internal' Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8090] manager: (lo): new Loopback device (/org/freedesktop/NetworkManager/Devices/1) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8101] device (lo): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8125] device (lo): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8131] device (lo): Activation: starting connection 'lo' (77f275e6-4c01-4392-ab9b-e140983cfde9) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8137] manager: (eth0): new Ethernet device (/org/freedesktop/NetworkManager/Devices/2) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8141] device (eth0): state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager.service - Network Manager. ░░ Subject: A start job for unit NetworkManager.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has finished successfully. ░░ ░░ The job identifier is 205. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8174] bus-manager: acquired D-Bus service "org.freedesktop.NetworkManager" Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target network.target - Network. ░░ Subject: A start job for unit network.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network.target has finished successfully. ░░ ░░ The job identifier is 208. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8196] device (lo): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8198] device (lo): state change: prepare -> config (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8200] device (lo): state change: config -> ip-config (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8201] device (eth0): carrier: link connected Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8203] device (lo): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8218] device (eth0): state change: unavailable -> disconnected (reason 'carrier-changed', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-wait-online.service - Network Manager Wait Online... ░░ Subject: A start job for unit NetworkManager-wait-online.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has begun execution. ░░ ░░ The job identifier is 204. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8237] policy: auto-activating connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8242] device (eth0): Activation: starting connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8245] device (eth0): state change: disconnected -> prepare (reason 'none', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8248] manager: NetworkManager state is now CONNECTING Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8252] device (eth0): state change: prepare -> config (reason 'none', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8261] device (eth0): state change: config -> ip-config (reason 'none', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8272] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting gssproxy.service - GSSAPI Proxy Daemon... ░░ Subject: A start job for unit gssproxy.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has begun execution. ░░ ░░ The job identifier is 244. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8299] dhcp4 (eth0): state changed new lease, address=10.31.43.117, acd pending Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started gssproxy.service - GSSAPI Proxy Daemon. ░░ Subject: A start job for unit gssproxy.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has finished successfully. ░░ ░░ The job identifier is 244. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: rpc-gssd.service - RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab). ░░ Subject: A start job for unit rpc-gssd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-gssd.service has finished successfully. ░░ ░░ The job identifier is 245. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target nfs-client.target - NFS client services. ░░ Subject: A start job for unit nfs-client.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit nfs-client.target has finished successfully. ░░ ░░ The job identifier is 241. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs-pre.target - Preparation for Remote File Systems. ░░ Subject: A start job for unit remote-fs-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs-pre.target has finished successfully. ░░ ░░ The job identifier is 249. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target remote-cryptsetup.target - Remote Encrypted Volumes. ░░ Subject: A start job for unit remote-cryptsetup.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-cryptsetup.target has finished successfully. ░░ ░░ The job identifier is 260. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs.target - Remote File Systems. ░░ Subject: A start job for unit remote-fs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs.target has finished successfully. ░░ ░░ The job identifier is 271. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: systemd-pcrphase.service - TPM PCR Barrier (User) was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-pcrphase.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pcrphase.service has finished successfully. ░░ ░░ The job identifier is 170. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 415. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9503] device (lo): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9512] device (lo): state change: secondaries -> activated (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9539] device (lo): Activation: successful, device activated. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9964] dhcp4 (eth0): state changed new lease, address=10.31.43.117 Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9975] policy: set 'cloud-init eth0' (eth0) as default for IPv4 routing and DNS Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0371] device (eth0): state change: ip-config -> ip-check (reason 'none', managed-type: 'full') Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0438] device (eth0): state change: ip-check -> secondaries (reason 'none', managed-type: 'full') Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0445] device (eth0): state change: secondaries -> activated (reason 'none', managed-type: 'full') Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0450] manager: NetworkManager state is now CONNECTED_SITE Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0460] device (eth0): Activation: successful, device activated. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0466] manager: NetworkManager state is now CONNECTED_GLOBAL Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0468] manager: startup complete Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished NetworkManager-wait-online.service - Network Manager Wait Online. ░░ Subject: A start job for unit NetworkManager-wait-online.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has finished successfully. ░░ ░░ The job identifier is 204. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting cloud-init.service - Initial cloud-init job (metadata service crawler)... ░░ Subject: A start job for unit cloud-init.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has begun execution. ░░ ░░ The job identifier is 274. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.11.160.238 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.18.100.10 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.2.32.37 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.2.32.38 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Cloud-init v. 24.1.4-21.el10 running 'init' at Sat, 14 Dec 2024 16:26:39 +0000. Up 15.52 seconds. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: ++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++++++++++ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | Device | Up | Address | Mask | Scope | Hw-Address | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | eth0 | True | 10.31.43.117 | 255.255.252.0 | global | 0e:03:6a:4a:4d:55 | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | eth0 | True | fe80::c03:6aff:fe4a:4d55/64 | . | link | 0e:03:6a:4a:4d:55 | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | lo | True | 127.0.0.1 | 255.0.0.0 | host | . | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | lo | True | ::1/128 | . | host | . | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: ++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | Route | Destination | Gateway | Genmask | Interface | Flags | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 0 | 0.0.0.0 | 10.31.40.1 | 0.0.0.0 | eth0 | UG | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 1 | 10.31.40.0 | 0.0.0.0 | 255.255.252.0 | eth0 | U | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +++++++++++++++++++Route IPv6 info+++++++++++++++++++ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+---------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | Route | Destination | Gateway | Interface | Flags | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+---------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 0 | fe80::/64 | :: | eth0 | U | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 2 | multicast | :: | eth0 | U | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+---------+-----------+-------+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Generating public/private rsa key pair. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key fingerprint is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: SHA256:4kPeOc6dyUInlbtyxVsYnSTAiNqxtK7A/xVLG9s/emE root@ip-10-31-43-117.us-east-1.aws.redhat.com Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key's randomart image is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +---[RSA 3072]----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . o.. | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | + . . . . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | + + . + . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . + o . o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . .o S. o o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o +.+oOo E . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o .+.Oo.+ + | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o =o+o++ | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | .. o+*o.. | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +----[SHA256]-----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Generating public/private ecdsa key pair. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key fingerprint is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: SHA256:RdoWwMOSgw51mBD28FuLabo0FGe7XvtI9kDaq60uA5s root@ip-10-31-43-117.us-east-1.aws.redhat.com Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key's randomart image is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +---[ECDSA 256]---+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | =+.+.+..o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | ..++.+ ++ . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | .o= .o..+ | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | +.* . o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . * o S | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |.. o = | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | ++ o * | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |E.o+ = * | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | .+=o=.o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +----[SHA256]-----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Generating public/private ed25519 key pair. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key fingerprint is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: SHA256:v1uLzZ9r22pXF2QsO+gHOGERom4ErsGOiGgKtl7LE5E root@ip-10-31-43-117.us-east-1.aws.redhat.com Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key's randomart image is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +--[ED25519 256]--+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . . oo . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |. . . . .o . + | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o ..o . o . = | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |* oEo o o o . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |*= .o S o . . .| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |= ... . . . o| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |.. .. . o o| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |. o.. * .oo.| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . o. +.++*=.| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +----[SHA256]-----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init.service - Initial cloud-init job (metadata service crawler). ░░ Subject: A start job for unit cloud-init.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has finished successfully. ░░ ░░ The job identifier is 274. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-config.target - Cloud-config availability. ░░ Subject: A start job for unit cloud-config.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.target has finished successfully. ░░ ░░ The job identifier is 277. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target network-online.target - Network is Online. ░░ Subject: A start job for unit network-online.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-online.target has finished successfully. ░░ ░░ The job identifier is 203. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting cloud-config.service - Apply the settings specified in cloud-config... ░░ Subject: A start job for unit cloud-config.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has begun execution. ░░ ░░ The job identifier is 276. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting kdump.service - Crash recovery kernel arming... ░░ Subject: A start job for unit kdump.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has begun execution. ░░ ░░ The job identifier is 256. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting restraintd.service - The restraint harness.... ░░ Subject: A start job for unit restraintd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has begun execution. ░░ ░░ The job identifier is 239. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting rpc-statd-notify.service - Notify NFS peers of a restart... ░░ Subject: A start job for unit rpc-statd-notify.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has begun execution. ░░ ░░ The job identifier is 242. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 261. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com sm-notify[872]: Version 2.7.1 starting Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started rpc-statd-notify.service - Notify NFS peers of a restart. ░░ Subject: A start job for unit rpc-statd-notify.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has finished successfully. ░░ ░░ The job identifier is 242. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com (sshd)[873]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started restraintd.service - The restraint harness.. ░░ Subject: A start job for unit restraintd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has finished successfully. ░░ ░░ The job identifier is 239. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[873]: Server listening on 0.0.0.0 port 22. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[873]: Server listening on :: port 22. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 261. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[916]: Cloud-init v. 24.1.4-21.el10 running 'modules:config' at Sat, 14 Dec 2024 16:26:41 +0000. Up 17.21 seconds. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[873]: Received signal 15; terminating. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopping sshd.service - OpenSSH server daemon... ░░ Subject: A stop job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 507. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit sshd.service has successfully entered the 'dead' state. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopped sshd.service - OpenSSH server daemon. ░░ Subject: A stop job for unit sshd.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has finished. ░░ ░░ The job identifier is 507 and the job result is done. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopped target sshd-keygen.target. ░░ Subject: A stop job for unit sshd-keygen.target has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has finished. ░░ ░░ The job identifier is 591 and the job result is done. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopping sshd-keygen.target... ░░ Subject: A stop job for unit sshd-keygen.target has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has begun execution. ░░ ░░ The job identifier is 591. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: ssh-host-keys-migration.service - Update OpenSSH host key permissions was skipped because of an unmet condition check (ConditionPathExists=!/var/lib/.ssh-host-keys-migration). ░░ Subject: A start job for unit ssh-host-keys-migration.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ssh-host-keys-migration.service has finished successfully. ░░ ░░ The job identifier is 590. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ecdsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ecdsa.service has finished successfully. ░░ ░░ The job identifier is 586. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ed25519.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ed25519.service has finished successfully. ░░ ░░ The job identifier is 588. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@rsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@rsa.service has finished successfully. ░░ ░░ The job identifier is 589. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target sshd-keygen.target. ░░ Subject: A start job for unit sshd-keygen.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen.target has finished successfully. ░░ ░░ The job identifier is 591. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 507. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com (sshd)[920]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[920]: Server listening on 0.0.0.0 port 22. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[920]: Server listening on :: port 22. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 507. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com restraintd[877]: Listening on http://localhost:8081 Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-config.service - Apply the settings specified in cloud-config. ░░ Subject: A start job for unit cloud-config.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has finished successfully. ░░ ░░ The job identifier is 276. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting cloud-final.service - Execute cloud user/final scripts... ░░ Subject: A start job for unit cloud-final.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has begun execution. ░░ ░░ The job identifier is 278. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting systemd-user-sessions.service - Permit User Sessions... ░░ Subject: A start job for unit systemd-user-sessions.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has begun execution. ░░ ░░ The job identifier is 240. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished systemd-user-sessions.service - Permit User Sessions. ░░ Subject: A start job for unit systemd-user-sessions.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has finished successfully. ░░ ░░ The job identifier is 240. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started crond.service - Command Scheduler. ░░ Subject: A start job for unit crond.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit crond.service has finished successfully. ░░ ░░ The job identifier is 255. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started getty@tty1.service - Getty on tty1. ░░ Subject: A start job for unit getty@tty1.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty@tty1.service has finished successfully. ░░ ░░ The job identifier is 227. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started serial-getty@ttyS0.service - Serial Getty on ttyS0. ░░ Subject: A start job for unit serial-getty@ttyS0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit serial-getty@ttyS0.service has finished successfully. ░░ ░░ The job identifier is 231. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target getty.target - Login Prompts. ░░ Subject: A start job for unit getty.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty.target has finished successfully. ░░ ░░ The job identifier is 226. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target multi-user.target - Multi-User System. ░░ Subject: A start job for unit multi-user.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit multi-user.target has finished successfully. ░░ ░░ The job identifier is 121. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) STARTUP (1.7.0) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP... ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has begun execution. ░░ ░░ The job identifier is 257. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) INFO (Syslog will be used instead of sendmail.) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 98% if used.) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) INFO (running with inotify support) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-update-utmp-runlevel.service has successfully entered the 'dead' state. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP. ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has finished successfully. ░░ ░░ The job identifier is 257. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Detected change(s) in the following file(s): /etc/fstab Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1020]: Cloud-init v. 24.1.4-21.el10 running 'modules:final' at Sat, 14 Dec 2024 16:26:41 +0000. Up 17.71 seconds. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1076]: ############################################################# Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1077]: -----BEGIN SSH HOST KEY FINGERPRINTS----- Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1079]: 256 SHA256:RdoWwMOSgw51mBD28FuLabo0FGe7XvtI9kDaq60uA5s root@ip-10-31-43-117.us-east-1.aws.redhat.com (ECDSA) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1085]: 256 SHA256:v1uLzZ9r22pXF2QsO+gHOGERom4ErsGOiGgKtl7LE5E root@ip-10-31-43-117.us-east-1.aws.redhat.com (ED25519) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1090]: 3072 SHA256:4kPeOc6dyUInlbtyxVsYnSTAiNqxtK7A/xVLG9s/emE root@ip-10-31-43-117.us-east-1.aws.redhat.com (RSA) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1092]: -----END SSH HOST KEY FINGERPRINTS----- Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1095]: ############################################################# Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1020]: Cloud-init v. 24.1.4-21.el10 finished at Sat, 14 Dec 2024 16:26:41 +0000. Datasource DataSourceEc2Local. Up 17.88 seconds Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-final.service - Execute cloud user/final scripts. ░░ Subject: A start job for unit cloud-final.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has finished successfully. ░░ ░░ The job identifier is 278. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-init.target - Cloud-init target. ░░ Subject: A start job for unit cloud-init.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.target has finished successfully. ░░ ░░ The job identifier is 273. Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 0 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 0 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 48 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 48 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 49 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 49 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 50 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 50 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 51 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 51 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 52 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 52 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 53 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 53 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 54 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 54 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 55 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 55 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 56 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 56 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 57 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 57 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 58 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 58 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 59 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 59 affinity is now unmanaged Dec 14 11:26:44 ip-10-31-43-117.us-east-1.aws.redhat.com kernel: block xvda: the capability attribute has been deprecated. Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Selected source 10.2.32.38 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Rebuilding /boot/initramfs-6.12.0-31.el10.x86_64kdump.img Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1384]: dracut-103-1.el10 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1387]: Executing: /usr/bin/dracut --list-modules Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1458]: dracut-103-1.el10 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Executing: /usr/bin/dracut --add kdumpbase --quiet --hostonly --hostonly-cmdline --hostonly-i18n --hostonly-mode strict --hostonly-nics --aggressive-strip --omit "rdma plymouth resume ifcfg earlykdump" --mount "/dev/disk/by-uuid/f3bb1e80-fac3-4b5e-93f6-d763469176c6 /sysroot xfs rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota" --add squash-squashfs --squash-compressor zstd --no-hostonly-default-device -f /boot/initramfs-6.12.0-31.el10.x86_64kdump.img 6.12.0-31.el10.x86_64 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd-wait-online' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'ifcfg' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'plymouth' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'resume' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'biosdevname' will not be installed, because command 'biosdevname' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'earlykdump' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fips *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fips-crypto-policies *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-ask-password *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-initrd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-journald *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-modules-load *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-sysctl *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-sysusers *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-tmpfiles *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-udevd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: rngd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: i18n *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: drm *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: prefixdevname *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: kernel-modules *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: kernel-modules-extra *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: configuration source "/run/depmod.d" does not exist Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: configuration source "/lib/depmod.d" does not exist Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: parsing configuration file "/etc/depmod.d/dist.conf" Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: /etc/depmod.d/dist.conf: added "updates extra built-in weak-updates" to the list of search directories Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: pcmcia *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Skipping udev rule: 60-pcmcia.rules Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fstab-sys *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: hwdb *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: rootfs-block *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: squash-squashfs *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: terminfo *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: udev-rules *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: dracut-systemd *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: usrmount *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: base *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fs-lib *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: kdumpbase *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: memstrack *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: microcode_ctl-fw_dir_override *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl module: mangling fw_dir Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: reset fw_dir to "/lib/firmware/updates /lib/firmware" Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel"... Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: intel: caveats check for kernel version "6.12.0-31.el10.x86_64" passed, adding "/usr/share/microcode_ctl/ucode_with_caveats/intel" to fw_dir variable Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel-06-4f-01"... Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: configuration "intel-06-4f-01" is ignored Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: final fw_dir: "/usr/share/microcode_ctl/ucode_with_caveats/intel /lib/firmware/updates /lib/firmware" Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: shutdown *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: squash-lib *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including modules done *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Installing kernel module dependencies *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Installing kernel module dependencies done *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Resolving executable dependencies *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Resolving executable dependencies done *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Hardlinking files *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Mode: real Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Method: sha256 Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Files: 537 Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Linked: 25 files Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Compared: 0 xattrs Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Compared: 48 files Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Saved: 13.58 MiB Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Duration: 0.163206 seconds Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Hardlinking files done *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Generating early-microcode cpio image *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Constructing GenuineIntel.bin *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Constructing GenuineIntel.bin *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Store current command line parameters *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Stored kernel commandline: Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: No dracut internal kernel commandline stored in the initramfs Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Squashing the files inside the initramfs *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Squashing the files inside the initramfs done *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Creating image file '/boot/initramfs-6.12.0-31.el10.x86_64kdump.img' *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Creating initramfs image file '/boot/initramfs-6.12.0-31.el10.x86_64kdump.img' done *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com kernel: PKCS7: Message signed outside of X.509 validity window Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: kexec: loaded kdump kernel Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Starting kdump: [OK] Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Notice: No vmcore creation test performed! Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished kdump.service - Crash recovery kernel arming. ░░ Subject: A start job for unit kdump.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has finished successfully. ░░ ░░ The job identifier is 256. Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Startup finished in 1.005s (kernel) + 3.724s (initrd) + 31.760s (userspace) = 36.490s. ░░ Subject: System start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ All system services necessary queued for starting at boot have been ░░ started. Note that this does not mean that the machine is now idle as services ░░ might still be busy with completing start-up. ░░ ░░ Kernel start-up required 1005350 microseconds. ░░ ░░ Initrd start-up required 3724049 microseconds. ░░ ░░ Userspace start-up required 31760904 microseconds. Dec 14 11:27:08 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Dec 14 11:27:51 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Selected source 216.66.48.42 (2.centos.pool.ntp.org) Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: Accepted publickey for root from 10.30.34.106 port 52592 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4322) opened. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Created slice user-0.slice - User Slice of UID 0. ░░ Subject: A start job for unit user-0.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-0.slice has finished successfully. ░░ ░░ The job identifier is 602. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting user-runtime-dir@0.service - User Runtime Directory /run/user/0... ░░ Subject: A start job for unit user-runtime-dir@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has begun execution. ░░ ░░ The job identifier is 601. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 1 of user root. ░░ Subject: A new session 1 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 1 has been created for the user root. ░░ ░░ The leading process of the session is 4322. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished user-runtime-dir@0.service - User Runtime Directory /run/user/0. ░░ Subject: A start job for unit user-runtime-dir@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has finished successfully. ░░ ░░ The job identifier is 601. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting user@0.service - User Manager for UID 0... ░░ Subject: A start job for unit user@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has begun execution. ░░ ░░ The job identifier is 681. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 2 of user root. ░░ Subject: A new session 2 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 2 has been created for the user root. ░░ ░░ The leading process of the session is 4327. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com (systemd)[4327]: pam_unix(systemd-user:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Queued start job for default target default.target. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Created slice app.slice - User Application Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 5. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: grub-boot-success.timer - Mark boot as successful after the user session has run 2 minutes was skipped because of an unmet condition check (ConditionUser=!@system). ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 10. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Started systemd-tmpfiles-clean.timer - Daily Cleanup of User's Temporary Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 11. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target paths.target - Paths. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 12. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target timers.target - Timers. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 9. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Starting dbus.socket - D-Bus User Message Bus Socket... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 4. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Starting systemd-tmpfiles-setup.service - Create User Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 8. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Finished systemd-tmpfiles-setup.service - Create User Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 8. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Listening on dbus.socket - D-Bus User Message Bus Socket. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 4. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target sockets.target - Sockets. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 3. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target basic.target - Basic System. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 2. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target default.target - Main User Target. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 1. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Startup finished in 127ms. ░░ Subject: User manager start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The user manager instance for user 0 has been started. All services queued ░░ for starting have been started. Note that other services might still be starting ░░ up or be started at any later time. ░░ ░░ Startup of the manager took 127888 microseconds. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started user@0.service - User Manager for UID 0. ░░ Subject: A start job for unit user@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has finished successfully. ░░ ░░ The job identifier is 681. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started session-1.scope - Session 1 of User root. ░░ Subject: A start job for unit session-1.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-1.scope has finished successfully. ░░ ░░ The job identifier is 762. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4338]: Received disconnect from 10.30.34.106 port 52592:11: disconnected by user Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4338]: Disconnected from user root 10.30.34.106 port 52592 Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4322) opened. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_unix(sshd:session): session closed for user root Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: session-1.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-1.scope has successfully entered the 'dead' state. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Session 1 logged out. Waiting for processes to exit. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Removed session 1. ░░ Subject: Session 1 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 1 has been terminated. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4376]: Accepted publickey for root from 10.31.8.152 port 43942 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: Accepted publickey for root from 10.31.8.152 port 43954 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4376]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4376) opened. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4377) opened. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 3 of user root. ░░ Subject: A new session 3 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 3 has been created for the user root. ░░ ░░ The leading process of the session is 4376. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started session-3.scope - Session 3 of User root. ░░ Subject: A start job for unit session-3.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-3.scope has finished successfully. ░░ ░░ The job identifier is 844. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 4 of user root. ░░ Subject: A new session 4 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 4 has been created for the user root. ░░ ░░ The leading process of the session is 4377. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started session-4.scope - Session 4 of User root. ░░ Subject: A start job for unit session-4.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-4.scope has finished successfully. ░░ ░░ The job identifier is 926. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4376]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4383]: Received disconnect from 10.31.8.152 port 43954:11: disconnected by user Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4383]: Disconnected from user root 10.31.8.152 port 43954 Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4377) opened. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_unix(sshd:session): session closed for user root Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: session-4.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-4.scope has successfully entered the 'dead' state. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Session 4 logged out. Waiting for processes to exit. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Removed session 4. ░░ Subject: Session 4 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 4 has been terminated. Dec 14 11:29:28 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 1008. Dec 14 11:29:28 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 1008. Dec 14 11:29:28 managed-node1 systemd-hostnamed[5857]: Hostname set to (static) Dec 14 11:29:28 managed-node1 NetworkManager[703]: [1734193768.6492] hostname: static hostname changed from "ip-10-31-43-117.us-east-1.aws.redhat.com" to "managed-node1" Dec 14 11:29:28 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1086. Dec 14 11:29:28 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1086. Dec 14 11:29:38 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:29:58 managed-node1 systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Dec 14 11:30:05 managed-node1 sshd-session[6523]: Accepted publickey for root from 10.31.13.174 port 42640 ssh2: RSA SHA256:9j1blwt3wcrRiGYZQ7ZGu9axm3cDklH6/z4c+Ee8CzE Dec 14 11:30:05 managed-node1 sshd-session[6523]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-6523) opened. Dec 14 11:30:05 managed-node1 systemd-logind[653]: New session 5 of user root. ░░ Subject: A new session 5 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 5 has been created for the user root. ░░ ░░ The leading process of the session is 6523. Dec 14 11:30:05 managed-node1 systemd[1]: Started session-5.scope - Session 5 of User root. ░░ Subject: A start job for unit session-5.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-5.scope has finished successfully. ░░ ░░ The job identifier is 1165. Dec 14 11:30:05 managed-node1 sshd-session[6523]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:30:06 managed-node1 python3.12[6679]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Dec 14 11:30:08 managed-node1 python3.12[6839]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:30:08 managed-node1 python3.12[6970]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:30:10 managed-node1 sudo[7232]: root : TTY=pts/0 ; PWD=/root ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qkiobsjfqecahznjrwohrybwqhyobeje ; /usr/bin/python3.12 /root/.ansible/tmp/ansible-tmp-1734193809.9793763-6984-204031990791383/AnsiballZ_dnf.py' Dec 14 11:30:10 managed-node1 sudo[7232]: pam_systemd(sudo:session): New sd-bus connection (system-bus-pam-systemd-7232) opened. Dec 14 11:30:10 managed-node1 sudo[7232]: pam_unix(sudo:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:30:10 managed-node1 python3.12[7235]: ansible-ansible.legacy.dnf Invoked with name=['iptables-nft', 'podman', 'shadow-utils-subid'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:30:27 managed-node1 kernel: SELinux: Converting 384 SID table entries... Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:30:34 managed-node1 kernel: SELinux: Converting 385 SID table entries... Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:30:42 managed-node1 kernel: SELinux: Converting 385 SID table entries... Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:30:43 managed-node1 setsebool[7309]: The virt_use_nfs policy boolean was changed to 1 by root Dec 14 11:30:43 managed-node1 setsebool[7309]: The virt_sandbox_use_all_caps policy boolean was changed to 1 by root Dec 14 11:30:52 managed-node1 kernel: SELinux: Converting 388 SID table entries... Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:31:00 managed-node1 kernel: SELinux: Converting 388 SID table entries... Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:31:17 managed-node1 systemd[1]: Started run-rdcb31fbbad404dfd86db5482f938d0b1.service - /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-rdcb31fbbad404dfd86db5482f938d0b1.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-rdcb31fbbad404dfd86db5482f938d0b1.service has finished successfully. ░░ ░░ The job identifier is 1247. Dec 14 11:31:17 managed-node1 systemd[1]: Reload requested from client PID 8034 ('systemctl') (unit session-5.scope)... Dec 14 11:31:17 managed-node1 systemd[1]: Reloading... Dec 14 11:31:17 managed-node1 systemd[1]: Reloading finished in 190 ms. Dec 14 11:31:17 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1325. Dec 14 11:31:17 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Dec 14 11:31:18 managed-node1 sudo[7232]: pam_unix(sudo:session): session closed for user root Dec 14 11:31:18 managed-node1 python3.12[8229]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:19 managed-node1 python3.12[8367]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Dec 14 11:31:19 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Dec 14 11:31:19 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1325. Dec 14 11:31:19 managed-node1 systemd[1]: run-rdcb31fbbad404dfd86db5482f938d0b1.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-rdcb31fbbad404dfd86db5482f938d0b1.service has successfully entered the 'dead' state. Dec 14 11:31:20 managed-node1 python3.12[8503]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:22 managed-node1 python3.12[8636]: ansible-tempfile Invoked with prefix=lsr_ suffix=_podman state=directory path=None Dec 14 11:31:22 managed-node1 python3.12[8767]: ansible-file Invoked with path=/tmp/lsr_6ehua9m0_podman/auth state=directory mode=0700 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:23 managed-node1 python3.12[8898]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:24 managed-node1 python3.12[9029]: ansible-ansible.legacy.dnf Invoked with name=['python3-pyasn1', 'python3-cryptography', 'python3-dbus'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:31:26 managed-node1 python3.12[9165]: ansible-ansible.legacy.dnf Invoked with name=['certmonger', 'python3-packaging'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:31:27 managed-node1 dbus-broker-launch[637]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Dec 14 11:31:27 managed-node1 dbus-broker-launch[637]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Dec 14 11:31:27 managed-node1 dbus-broker-launch[637]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Dec 14 11:31:27 managed-node1 systemd[1]: Reload requested from client PID 9173 ('systemctl') (unit session-5.scope)... Dec 14 11:31:27 managed-node1 systemd[1]: Reloading... Dec 14 11:31:28 managed-node1 systemd[1]: Reloading finished in 183 ms. Dec 14 11:31:28 managed-node1 systemd[1]: Started run-r3d4cf19d1fc24d23b770a4063f70f37f.service - /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-r3d4cf19d1fc24d23b770a4063f70f37f.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-r3d4cf19d1fc24d23b770a4063f70f37f.service has finished successfully. ░░ ░░ The job identifier is 1407. Dec 14 11:31:28 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1485. Dec 14 11:31:28 managed-node1 systemd[1]: Reload requested from client PID 9234 ('systemctl') (unit session-5.scope)... Dec 14 11:31:28 managed-node1 systemd[1]: Reloading... Dec 14 11:31:28 managed-node1 systemd[1]: Reloading finished in 291 ms. Dec 14 11:31:28 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Dec 14 11:31:29 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Dec 14 11:31:29 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1485. Dec 14 11:31:29 managed-node1 systemd[1]: run-r3d4cf19d1fc24d23b770a4063f70f37f.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-r3d4cf19d1fc24d23b770a4063f70f37f.service has successfully entered the 'dead' state. Dec 14 11:31:29 managed-node1 python3.12[9425]: ansible-file Invoked with name=/etc/certmonger//pre-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//pre-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:29 managed-node1 python3.12[9556]: ansible-file Invoked with name=/etc/certmonger//post-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//post-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:30 managed-node1 python3.12[9687]: ansible-ansible.legacy.systemd Invoked with name=certmonger state=started enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None Dec 14 11:31:30 managed-node1 systemd[1]: Reload requested from client PID 9690 ('systemctl') (unit session-5.scope)... Dec 14 11:31:30 managed-node1 systemd[1]: Reloading... Dec 14 11:31:30 managed-node1 systemd[1]: Reloading finished in 186 ms. Dec 14 11:31:30 managed-node1 systemd[1]: Starting fstrim.service - Discard unused blocks on filesystems from /etc/fstab... ░░ Subject: A start job for unit fstrim.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has begun execution. ░░ ░░ The job identifier is 1563. Dec 14 11:31:31 managed-node1 systemd[1]: Starting certmonger.service - Certificate monitoring and PKI enrollment... ░░ Subject: A start job for unit certmonger.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has begun execution. ░░ ░░ The job identifier is 1641. Dec 14 11:31:31 managed-node1 (rtmonger)[9745]: certmonger.service: Referenced but unset environment variable evaluates to an empty string: OPTS Dec 14 11:31:31 managed-node1 systemd[1]: fstrim.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit fstrim.service has successfully entered the 'dead' state. Dec 14 11:31:31 managed-node1 systemd[1]: Finished fstrim.service - Discard unused blocks on filesystems from /etc/fstab. ░░ Subject: A start job for unit fstrim.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has finished successfully. ░░ ░░ The job identifier is 1563. Dec 14 11:31:31 managed-node1 systemd[1]: Started certmonger.service - Certificate monitoring and PKI enrollment. ░░ Subject: A start job for unit certmonger.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has finished successfully. ░░ ░░ The job identifier is 1641. Dec 14 11:31:31 managed-node1 python3.12[9904]: ansible-fedora.linux_system_roles.certificate_request Invoked with name=podman_registry dns=['localhost', '127.0.0.1'] directory=/etc/pki/tls wait=True ca=self-sign __header=# # Ansible managed # # system_role:certificate provider_config_directory=/etc/certmonger provider=certmonger key_usage=['digitalSignature', 'keyEncipherment'] extended_key_usage=['id-kp-serverAuth', 'id-kp-clientAuth'] auto_renew=True ip=None email=None common_name=None country=None state=None locality=None organization=None organizational_unit=None contact_email=None key_size=None owner=None group=None mode=None principal=None run_before=None run_after=None Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9919]: Certificate in file "/etc/pki/tls/certs/podman_registry.crt" issued by CA and saved. Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 python3.12[10050]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Dec 14 11:31:33 managed-node1 python3.12[10181]: ansible-slurp Invoked with path=/etc/pki/tls/private/podman_registry.key src=/etc/pki/tls/private/podman_registry.key Dec 14 11:31:33 managed-node1 python3.12[10312]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Dec 14 11:31:34 managed-node1 python3.12[10443]: ansible-ansible.legacy.command Invoked with _raw_params=getcert stop-tracking -f /etc/pki/tls/certs/podman_registry.crt _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:34 managed-node1 certmonger[9745]: 2024-12-14 11:31:34 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:34 managed-node1 python3.12[10575]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:34 managed-node1 python3.12[10706]: ansible-file Invoked with path=/etc/pki/tls/private/podman_registry.key state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:35 managed-node1 python3.12[10837]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:35 managed-node1 python3.12[10968]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_6ehua9m0_podman/auth/registry_cert.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:31:36 managed-node1 python3.12[11073]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_6ehua9m0_podman/auth/registry_cert.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1734193895.4098308-8404-275762393983029/.source.crt _original_basename=.fdte2xv6 follow=False checksum=a56753cb72985d5015d277aab9534d583f3099c2 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:36 managed-node1 python3.12[11204]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_6ehua9m0_podman/auth/registry_key.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:31:36 managed-node1 python3.12[11309]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_6ehua9m0_podman/auth/registry_key.pem mode=0600 src=/root/.ansible/tmp/ansible-tmp-1734193896.2446203-8452-271283031401177/.source.pem _original_basename=.b248p56a follow=False checksum=3c4bd2383044d864f778448dd3788c2bdf7f63a0 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:37 managed-node1 python3.12[11440]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_6ehua9m0_podman/auth/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:31:37 managed-node1 python3.12[11545]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_6ehua9m0_podman/auth/ca.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1734193896.9596615-8488-86842030413265/.source.crt _original_basename=.5xr0fb34 follow=False checksum=a56753cb72985d5015d277aab9534d583f3099c2 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:38 managed-node1 python3.12[11676]: ansible-ansible.legacy.dnf Invoked with name=['httpd-tools', 'skopeo'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:31:42 managed-node1 systemd[1]: Started run-ra2bab39c1da445c09f883f3d116af994.service - /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-ra2bab39c1da445c09f883f3d116af994.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-ra2bab39c1da445c09f883f3d116af994.service has finished successfully. ░░ ░░ The job identifier is 1720. Dec 14 11:31:42 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1798. Dec 14 11:31:42 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Dec 14 11:31:42 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1798. Dec 14 11:31:42 managed-node1 systemd[1]: run-ra2bab39c1da445c09f883f3d116af994.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-ra2bab39c1da445c09f883f3d116af994.service has successfully entered the 'dead' state. Dec 14 11:31:43 managed-node1 python3.12[12190]: ansible-ansible.legacy.command Invoked with _raw_params=podman run -d -p 127.0.0.1:5000:5000 --name podman_registry -v /tmp/lsr_6ehua9m0_podman/auth:/auth:Z -e REGISTRY_AUTH=htpasswd -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/registry_cert.crt -e REGISTRY_HTTP_TLS_KEY=/auth/registry_key.pem quay.io/libpod/registry:2.8.2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:44 managed-node1 systemd[1]: var-lib-containers-storage-overlay-compat1989105179-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-compat1989105179-merged.mount has successfully entered the 'dead' state. Dec 14 11:31:44 managed-node1 kernel: evm: overlay not supported Dec 14 11:31:44 managed-node1 systemd[1]: var-lib-containers-storage-overlay-metacopy\x2dcheck2016384165-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-metacopy\x2dcheck2016384165-merged.mount has successfully entered the 'dead' state. Dec 14 11:31:44 managed-node1 podman[12191]: 2024-12-14 11:31:44.032711562 -0500 EST m=+0.082834242 system refresh Dec 14 11:31:45 managed-node1 podman[12191]: 2024-12-14 11:31:45.721997956 -0500 EST m=+1.772120507 volume create 174bec1e6ec18bfefedd3e5fc8a18b56102b9fb6012f8e02b781fd81b243eef3 Dec 14 11:31:45 managed-node1 podman[12191]: 2024-12-14 11:31:45.701531903 -0500 EST m=+1.751654658 image pull 0030ba3d620c647159c935ee778991c68ef3e51a274703753b0bc530104ef5e5 quay.io/libpod/registry:2.8.2 Dec 14 11:31:45 managed-node1 podman[12191]: 2024-12-14 11:31:45.732182371 -0500 EST m=+1.782304936 container create 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:31:45 managed-node1 kernel: bridge: filtering via arp/ip/ip6tables is no longer available by default. Update your scripts to load br_netfilter if you need this. Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7779] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/3) Dec 14 11:31:45 managed-node1 (udev-worker)[12280]: Network interface NamePolicy= disabled on kernel command line. Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Dec 14 11:31:45 managed-node1 kernel: veth0: entered allmulticast mode Dec 14 11:31:45 managed-node1 kernel: veth0: entered promiscuous mode Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Dec 14 11:31:45 managed-node1 (udev-worker)[12198]: Network interface NamePolicy= disabled on kernel command line. Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7970] device (veth0): carrier: link connected Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7973] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4) Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7998] device (podman0): carrier: link connected Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8066] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8071] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8078] device (podman0): Activation: starting connection 'podman0' (08e2f206-5ac2-4e2f-8306-ac90b232dcf4) Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8080] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8083] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8086] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8089] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1877. Dec 14 11:31:45 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1877. Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8659] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8662] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8667] device (podman0): Activation: successful, device activated. Dec 14 11:31:46 managed-node1 systemd[1]: Created slice machine.slice - Slice /machine. ░░ Subject: A start job for unit machine.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine.slice has finished successfully. ░░ ░░ The job identifier is 1957. Dec 14 11:31:46 managed-node1 systemd[1]: Started libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope. ░░ Subject: A start job for unit libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully. ░░ ░░ The job identifier is 1956. Dec 14 11:31:46 managed-node1 systemd[1]: Started libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope - libcrun container. ░░ Subject: A start job for unit libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully. ░░ ░░ The job identifier is 1962. Dec 14 11:31:46 managed-node1 podman[12191]: 2024-12-14 11:31:46.056393753 -0500 EST m=+2.106516450 container init 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:31:46 managed-node1 podman[12191]: 2024-12-14 11:31:46.060003186 -0500 EST m=+2.110125831 container start 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:31:46 managed-node1 python3.12[12486]: ansible-wait_for Invoked with port=5000 host=127.0.0.1 timeout=300 connect_timeout=5 delay=0 active_connection_states=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT'] state=started sleep=1 path=None search_regex=None exclude_hosts=None msg=None Dec 14 11:31:47 managed-node1 python3.12[12617]: ansible-ansible.legacy.command Invoked with _raw_params=podman logs podman_registry _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:47 managed-node1 python3.12[12755]: ansible-ansible.legacy.command Invoked with _raw_params=podman pull quay.io/libpod/testimage:20210610; podman push --authfile="/tmp/lsr_6ehua9m0_podman/auth/auth.json" --cert-dir="/tmp/lsr_6ehua9m0_podman/auth" quay.io/libpod/testimage:20210610 docker://localhost:5000/libpod/testimage:20210610 _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:49 managed-node1 podman[12757]: 2024-12-14 11:31:49.445577203 -0500 EST m=+1.809759385 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Dec 14 11:31:50 managed-node1 podman[12756]: 2024-12-14 11:31:49.47108663 -0500 EST m=+0.016538804 image push 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f docker://localhost:5000/libpod/testimage:20210610 Dec 14 11:31:50 managed-node1 python3.12[12918]: ansible-ansible.legacy.command Invoked with _raw_params=skopeo inspect --authfile="/tmp/lsr_6ehua9m0_podman/auth/auth.json" --cert-dir="/tmp/lsr_6ehua9m0_podman/auth" docker://localhost:5000/libpod/testimage:20210610 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:53 managed-node1 python3.12[13187]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:54 managed-node1 python3.12[13324]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:55 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:31:57 managed-node1 python3.12[13458]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:58 managed-node1 python3.12[13591]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:00 managed-node1 podman[13732]: 2024-12-14 11:32:00.132917471 -0500 EST m=+0.121086648 image pull-error localhost:5000/libpod/testimage:20210610 initializing source docker://localhost:5000/libpod/testimage:20210610: reading manifest 20210610 in localhost:5000/libpod/testimage: authentication required Dec 14 11:32:02 managed-node1 python3.12[14001]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:04 managed-node1 python3.12[14138]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:07 managed-node1 python3.12[14271]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:08 managed-node1 python3.12[14404]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:09 managed-node1 podman[14543]: 2024-12-14 11:32:09.204081254 -0500 EST m=+0.145458147 image pull-error localhost:5000/libpod/testimage:20210610 initializing source docker://localhost:5000/libpod/testimage:20210610: pinging container registry localhost:5000: Get "https://localhost:5000/v2/": tls: failed to verify certificate: x509: certificate signed by unknown authority Dec 14 11:32:12 managed-node1 python3.12[14812]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:14 managed-node1 python3.12[14949]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:16 managed-node1 python3.12[15082]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:17 managed-node1 python3.12[15215]: ansible-file Invoked with path=/etc/containers/certs.d/localhost:5000 state=directory owner=root group=0 mode=0700 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:20 managed-node1 python3.12[15582]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:21 managed-node1 python3.12[15715]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:22 managed-node1 podman[15856]: 2024-12-14 11:32:22.830406841 -0500 EST m=+0.198941135 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f localhost:5000/libpod/testimage:20210610 Dec 14 11:32:23 managed-node1 python3.12[16001]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:23 managed-node1 python3.12[16132]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:24 managed-node1 python3.12[16263]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:32:24 managed-node1 python3.12[16368]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1734193943.8234994-10632-256821299067559/.source.yml _original_basename=.h1hpyflq follow=False checksum=fb0097683a2e5c8909a8037d64ddc1b350aed0be backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:25 managed-node1 python3.12[16499]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Dec 14 11:32:25 managed-node1 python3.12[16643]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:28 managed-node1 python3.12[16906]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:29 managed-node1 python3.12[17043]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:32 managed-node1 python3.12[17176]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:34 managed-node1 python3.12[17309]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:36 managed-node1 python3.12[17442]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:37 managed-node1 python3.12[17575]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:38 managed-node1 python3.12[17707]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-auth_test_1_kube.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Dec 14 11:32:38 managed-node1 python3.12[17840]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:39 managed-node1 python3.12[17973]: ansible-containers.podman.podman_play Invoked with state=absent kube_file=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Dec 14 11:32:39 managed-node1 python3.12[17973]: ansible-containers.podman.podman_play version: 5.3.1, kube file /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml Dec 14 11:32:40 managed-node1 python3.12[18117]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:41 managed-node1 python3.12[18248]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:43 managed-node1 python3.12[18381]: ansible-systemd Invoked with name=auth_test_1_quadlet.service scope=system state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None Dec 14 11:32:43 managed-node1 python3.12[18513]: ansible-stat Invoked with path=/etc/containers/systemd/auth_test_1_quadlet.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:44 managed-node1 python3.12[18644]: ansible-file Invoked with path=/etc/containers/systemd/auth_test_1_quadlet.container state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:46 managed-node1 python3.12[18775]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:49 managed-node1 python3.12[19170]: ansible-file Invoked with path=/root/.config/containers state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:50 managed-node1 python3.12[19301]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:55 managed-node1 python3.12[20220]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:56 managed-node1 python3.12[20357]: ansible-getent Invoked with database=passwd key=auth_test_user1 fail_key=False service=None split=None Dec 14 11:32:57 managed-node1 python3.12[20489]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:58 managed-node1 python3.12[20621]: ansible-user Invoked with name=auth_test_user1 state=absent non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on managed-node1 update_password=always uid=None group=None groups=None comment=None home=None shell=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None Dec 14 11:32:58 managed-node1 python3.12[20753]: ansible-file Invoked with path=/home/auth_test_user1 state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:58 managed-node1 rsyslogd[651]: imjournal: journal files changed, reloading... [v8.2408.0-2.el10 try https://www.rsyslog.com/e/0 ] Dec 14 11:32:59 managed-node1 python3.12[20885]: ansible-ansible.legacy.command Invoked with _raw_params=podman inspect podman_registry --format '{{range .}}{{range .Mounts}}{{if eq .Type "volume"}}{{.Name}}{{end}}{{end}}{{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:59 managed-node1 python3.12[21023]: ansible-ansible.legacy.command Invoked with _raw_params=podman rm -f podman_registry _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:59 managed-node1 systemd[1]: libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 podman[21024]: 2024-12-14 11:32:59.789115628 -0500 EST m=+0.041007423 container died 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:32:59 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Dec 14 11:32:59 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Dec 14 11:32:59 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Dec 14 11:32:59 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Dec 14 11:32:59 managed-node1 NetworkManager[703]: [1734193979.8303] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Dec 14 11:32:59 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1969. Dec 14 11:32:59 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1969. Dec 14 11:32:59 managed-node1 systemd[1]: run-netns-netns\x2d85bf4814\x2dacfa\x2db691\x2da100\x2d70dfbf3554ab.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d85bf4814\x2dacfa\x2db691\x2da100\x2d70dfbf3554ab.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a-userdata-shm.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 systemd[1]: var-lib-containers-storage-overlay-caa7ce3c9d2ffccfe015c47869a4faf042de89e661c34084c6c65ad743feba41-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-caa7ce3c9d2ffccfe015c47869a4faf042de89e661c34084c6c65ad743feba41-merged.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 podman[21024]: 2024-12-14 11:32:59.914889749 -0500 EST m=+0.166781463 container remove 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:32:59 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 systemd[1]: libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has successfully entered the 'dead' state. Dec 14 11:33:00 managed-node1 python3.12[21191]: ansible-ansible.legacy.command Invoked with _raw_params=podman volume rm 174bec1e6ec18bfefedd3e5fc8a18b56102b9fb6012f8e02b781fd81b243eef3 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:00 managed-node1 podman[21192]: 2024-12-14 11:33:00.361517439 -0500 EST m=+0.026007234 volume remove 174bec1e6ec18bfefedd3e5fc8a18b56102b9fb6012f8e02b781fd81b243eef3 Dec 14 11:33:00 managed-node1 python3.12[21330]: ansible-file Invoked with path=/tmp/lsr_6ehua9m0_podman state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:33:03 managed-node1 python3.12[21504]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Dec 14 11:33:04 managed-node1 python3.12[21664]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:05 managed-node1 python3.12[21795]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:07 managed-node1 python3.12[22057]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:09 managed-node1 python3.12[22195]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Dec 14 11:33:09 managed-node1 python3.12[22327]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:09 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:33:13 managed-node1 python3.12[22461]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:15 managed-node1 python3.12[22594]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:33:15 managed-node1 python3.12[22725]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:33:16 managed-node1 python3.12[22830]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1734193995.4057543-13488-229431513263554/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:33:17 managed-node1 python3.12[22961]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Dec 14 11:33:17 managed-node1 systemd[1]: Reload requested from client PID 22962 ('systemctl') (unit session-5.scope)... Dec 14 11:33:17 managed-node1 systemd[1]: Reloading... Dec 14 11:33:17 managed-node1 systemd[1]: Reloading finished in 198 ms. Dec 14 11:33:18 managed-node1 python3.12[23145]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2048. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23149]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23149]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:18 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23158]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23158]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:18 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:18 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2048 and the job result is failed. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 1. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2132. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23187]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23187]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23215]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23215]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:18 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2132 and the job result is failed. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 2. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2216. Dec 14 11:33:18 managed-node1 python3.12[23312]: ansible-ansible.legacy.command Invoked with _raw_params=set -x set -o pipefail exec 1>&2 #podman volume rm --all #podman network prune -f podman volume ls podman network ls podman secret ls podman container ls podman pod ls podman images systemctl list-units | grep quadlet systemctl list-unit-files | grep quadlet ls -alrtF /etc/containers/systemd /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23313]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23313]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23329]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23329]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:18 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2216 and the job result is failed. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 3. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2300. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23377]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23377]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:19 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23386]: time="2024-12-14T11:33:19-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23386]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:19 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:19 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2300 and the job result is failed. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 4. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:19 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2384. Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23488]: time="2024-12-14T11:33:19-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23488]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:19 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23519]: time="2024-12-14T11:33:19-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23519]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:19 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2384 and the job result is failed. Dec 14 11:33:19 managed-node1 python3.12[23544]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 5. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Start request repeated too quickly. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:19 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2468 and the job result is failed. Dec 14 11:33:20 managed-node1 python3.12[23678]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None TASK [Cleanup user] ************************************************************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:159 Saturday 14 December 2024 11:33:20 -0500 (0:00:00.466) 0:00:17.873 ***** included: fedora.linux_system_roles.podman for managed-node1 TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:3 Saturday 14 December 2024 11:33:20 -0500 (0:00:00.190) 0:00:18.063 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Ensure ansible_facts used by role] **** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:3 Saturday 14 December 2024 11:33:20 -0500 (0:00:00.088) 0:00:18.152 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_required_facts | difference(ansible_facts.keys() | list) | length > 0", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if system is ostree] ************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 Saturday 14 December 2024 11:33:20 -0500 (0:00:00.060) 0:00:18.212 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag to indicate system is ostree] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:16 Saturday 14 December 2024 11:33:20 -0500 (0:00:00.050) 0:00:18.263 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_ostree is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 Saturday 14 December 2024 11:33:20 -0500 (0:00:00.049) 0:00:18.313 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set flag if transactional-update exists] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:28 Saturday 14 December 2024 11:33:20 -0500 (0:00:00.074) 0:00:18.387 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "not __podman_is_transactional is defined", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Set platform/version specific variables] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:32 Saturday 14 December 2024 11:33:21 -0500 (0:00:00.056) 0:00:18.444 ***** ok: [managed-node1] => (item=RedHat.yml) => { "ansible_facts": { "__podman_packages": [ "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/vars/RedHat.yml" ], "ansible_loop_var": "item", "changed": false, "item": "RedHat.yml" } skipping: [managed-node1] => (item=CentOS.yml) => { "ansible_loop_var": "item", "changed": false, "false_condition": "__vars_file is file", "item": "CentOS.yml", "skip_reason": "Conditional result was False" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } ok: [managed-node1] => (item=CentOS_10.yml) => { "ansible_facts": { "__podman_packages": [ "iptables-nft", "podman", "shadow-utils-subid" ] }, "ansible_included_var_files": [ "/tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/vars/CentOS_10.yml" ], "ansible_loop_var": "item", "changed": false, "item": "CentOS_10.yml" } TASK [fedora.linux_system_roles.podman : Gather the package facts] ************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 Saturday 14 December 2024 11:33:21 -0500 (0:00:00.128) 0:00:18.573 ***** ok: [managed-node1] => { "censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result", "changed": false } TASK [fedora.linux_system_roles.podman : Enable copr if requested] ************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:10 Saturday 14 December 2024 11:33:21 -0500 (0:00:00.841) 0:00:19.414 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_use_copr | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Ensure required packages are installed] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:14 Saturday 14 December 2024 11:33:22 -0500 (0:00:00.050) 0:00:19.464 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "(__podman_packages | difference(ansible_facts.packages))", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Notify user that reboot is needed to apply changes] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:28 Saturday 14 December 2024 11:33:22 -0500 (0:00:00.039) 0:00:19.504 ***** skipping: [managed-node1] => { "false_condition": "__podman_is_transactional | d(false)" } TASK [fedora.linux_system_roles.podman : Reboot transactional update systems] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:33 Saturday 14 December 2024 11:33:22 -0500 (0:00:00.036) 0:00:19.540 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Fail if reboot is needed and not set] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:38 Saturday 14 December 2024 11:33:22 -0500 (0:00:00.043) 0:00:19.584 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "__podman_is_transactional | d(false)", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Get podman version] ******************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 Saturday 14 December 2024 11:33:22 -0500 (0:00:00.050) 0:00:19.634 ***** ok: [managed-node1] => { "changed": false, "cmd": [ "podman", "--version" ], "delta": "0:00:00.024972", "end": "2024-12-14 11:33:22.558413", "rc": 0, "start": "2024-12-14 11:33:22.533441" } STDOUT: podman version 5.3.1 STDERR: time="2024-12-14T11:33:22-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" TASK [fedora.linux_system_roles.podman : Set podman version] ******************* task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:52 Saturday 14 December 2024 11:33:22 -0500 (0:00:00.491) 0:00:20.126 ***** ok: [managed-node1] => { "ansible_facts": { "podman_version": "5.3.1" }, "changed": false } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.2 or later] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56 Saturday 14 December 2024 11:33:22 -0500 (0:00:00.107) 0:00:20.233 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.2\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:63 Saturday 14 December 2024 11:33:23 -0500 (0:00:00.214) 0:00:20.448 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"4.4\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 4.4 or later for quadlet, secrets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:73 Saturday 14 December 2024 11:33:23 -0500 (0:00:00.107) 0:00:20.556 ***** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:80 Saturday 14 December 2024 11:33:23 -0500 (0:00:00.094) 0:00:20.650 ***** skipping: [managed-node1] => { "changed": false, "false_condition": "podman_version is version(\"5.0\", \"<\")", "skip_reason": "Conditional result was False" } TASK [fedora.linux_system_roles.podman : Podman package version must be 5.0 or later for Pod quadlets] *** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:96 Saturday 14 December 2024 11:33:23 -0500 (0:00:00.102) 0:00:20.753 ***** META: end_host conditional evaluated to False, continuing execution for managed-node1 skipping: [managed-node1] => { "skip_reason": "end_host conditional evaluated to False, continuing execution for managed-node1" } MSG: end_host conditional evaluated to false, continuing execution for managed-node1 TASK [fedora.linux_system_roles.podman : Check user and group information] ***** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:109 Saturday 14 December 2024 11:33:23 -0500 (0:00:00.127) 0:00:20.880 ***** included: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml for managed-node1 TASK [fedora.linux_system_roles.podman : Get user information] ***************** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Saturday 14 December 2024 11:33:23 -0500 (0:00:00.114) 0:00:20.994 ***** ok: [managed-node1] => { "ansible_facts": { "getent_passwd": { "user_quadlet_pod": null } }, "changed": false } MSG: One or more supplied key could not be found in the database. TASK [fedora.linux_system_roles.podman : Fail if user does not exist] ********** task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:9 Saturday 14 December 2024 11:33:24 -0500 (0:00:00.460) 0:00:21.455 ***** fatal: [managed-node1]: FAILED! => { "changed": false } MSG: The given podman user [user_quadlet_pod] does not exist - cannot continue TASK [Dump journal] ************************************************************ task path: /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:194 Saturday 14 December 2024 11:33:24 -0500 (0:00:00.056) 0:00:21.512 ***** fatal: [managed-node1]: FAILED! => { "changed": false, "cmd": [ "journalctl", "-ex" ], "delta": "0:00:00.028554", "end": "2024-12-14 11:33:24.411784", "failed_when_result": true, "rc": 0, "start": "2024-12-14 11:33:24.383230" } STDOUT: Dec 14 11:26:32 localhost systemd[1]: Mounted dev-mqueue.mount - POSIX Message Queue File System. Dec 14 11:26:32 localhost systemd[1]: Mounted sys-kernel-debug.mount - Kernel Debug File System. Dec 14 11:26:32 localhost systemd[1]: Mounted sys-kernel-tracing.mount - Kernel Trace File System. Dec 14 11:26:32 localhost systemd[1]: Finished kmod-static-nodes.service - Create List of Static Device Nodes. Dec 14 11:26:32 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@configfs.service - Load Kernel Module configfs. Dec 14 11:26:32 localhost systemd[1]: modprobe@dm_mod.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@dm_mod.service - Load Kernel Module dm_mod. Dec 14 11:26:32 localhost systemd[1]: modprobe@drm.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@drm.service - Load Kernel Module drm. Dec 14 11:26:32 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@efi_pstore.service - Load Kernel Module efi_pstore. Dec 14 11:26:32 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@fuse.service - Load Kernel Module fuse. Dec 14 11:26:32 localhost systemd[1]: modprobe@loop.service: Deactivated successfully. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@loop.service - Load Kernel Module loop. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-network-generator.service - Generate network units from Kernel command line. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-remount-fs.service - Remount Root and Kernel File Systems. Dec 14 11:26:32 localhost systemd-journald[523]: Journal started ░░ Subject: The journal has been started ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The system journal process has started up, opened the journal ░░ files for writing and is now ready to process requests. Dec 14 11:26:32 localhost systemd-journald[523]: Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is 8M, max 70.5M, 62.5M free. ░░ Subject: Disk space used by the journal ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is currently using 8M. ░░ Maximum allowed usage is set to 70.5M. ░░ Leaving at least 35.2M free (of currently available 689.3M of disk space). ░░ Enforced usage limit is thus 70.5M, of which 62.5M are still available. ░░ ░░ The limits controlling how much disk space is used by the journal may ░░ be configured with SystemMaxUse=, SystemKeepFree=, SystemMaxFileSize=, ░░ RuntimeMaxUse=, RuntimeKeepFree=, RuntimeMaxFileSize= settings in ░░ /etc/systemd/journald.conf. See journald.conf(5) for details. Dec 14 11:26:31 localhost systemd[1]: Queued start job for default target multi-user.target. Dec 14 11:26:31 localhost systemd[1]: systemd-journald.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-journald.service has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Started systemd-journald.service - Journal Service. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-sysctl.service - Apply Kernel Variables. ░░ Subject: A start job for unit systemd-sysctl.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysctl.service has finished successfully. ░░ ░░ The job identifier is 181. Dec 14 11:26:32 localhost systemd[1]: systemd-hwdb-update.service - Rebuild Hardware Database was skipped because of an unmet condition check (ConditionNeedsUpdate=/etc). ░░ Subject: A start job for unit systemd-hwdb-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hwdb-update.service has finished successfully. ░░ ░░ The job identifier is 177. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-journal-flush.service - Flush Journal to Persistent Storage... ░░ Subject: A start job for unit systemd-journal-flush.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-journal-flush.service has begun execution. ░░ ░░ The job identifier is 152. Dec 14 11:26:32 localhost systemd[1]: systemd-pstore.service - Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore). ░░ Subject: A start job for unit systemd-pstore.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pstore.service has finished successfully. ░░ ░░ The job identifier is 147. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-random-seed.service - Load/Save OS Random Seed... ░░ Subject: A start job for unit systemd-random-seed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-random-seed.service has begun execution. ░░ ░░ The job identifier is 137. Dec 14 11:26:32 localhost systemd[1]: systemd-repart.service - Repartition Root Disk was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-repart.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-repart.service has finished successfully. ░░ ░░ The job identifier is 160. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-tmpfiles-setup-dev-early.service - Create Static Device Nodes in /dev gracefully... ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev-early.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev-early.service has begun execution. ░░ ░░ The job identifier is 198. Dec 14 11:26:32 localhost systemd[1]: systemd-tpm2-setup.service - TPM SRK Setup was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-tpm2-setup.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tpm2-setup.service has finished successfully. ░░ ░░ The job identifier is 151. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-udev-load-credentials.service - Load udev Rules from Credentials. ░░ Subject: A start job for unit systemd-udev-load-credentials.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udev-load-credentials.service has finished successfully. ░░ ░░ The job identifier is 173. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-random-seed.service - Load/Save OS Random Seed. ░░ Subject: A start job for unit systemd-random-seed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-random-seed.service has finished successfully. ░░ ░░ The job identifier is 137. Dec 14 11:26:32 localhost systemd[1]: Finished lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling. ░░ Subject: A start job for unit lvm2-monitor.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit lvm2-monitor.service has finished successfully. ░░ ░░ The job identifier is 186. Dec 14 11:26:32 localhost systemd-journald[523]: Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is 8M, max 70.5M, 62.5M free. ░░ Subject: Disk space used by the journal ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Runtime Journal (/run/log/journal/ec2a8c5f24a2db6683c20bfae8cc5947) is currently using 8M. ░░ Maximum allowed usage is set to 70.5M. ░░ Leaving at least 35.2M free (of currently available 689.3M of disk space). ░░ Enforced usage limit is thus 70.5M, of which 62.5M are still available. ░░ ░░ The limits controlling how much disk space is used by the journal may ░░ be configured with SystemMaxUse=, SystemKeepFree=, SystemMaxFileSize=, ░░ RuntimeMaxUse=, RuntimeKeepFree=, RuntimeMaxFileSize= settings in ░░ /etc/systemd/journald.conf. See journald.conf(5) for details. Dec 14 11:26:32 localhost systemd-journald[523]: Received client request to flush runtime journal. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-journal-flush.service - Flush Journal to Persistent Storage. ░░ Subject: A start job for unit systemd-journal-flush.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-journal-flush.service has finished successfully. ░░ ░░ The job identifier is 152. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-udev-trigger.service - Coldplug All udev Devices. ░░ Subject: A start job for unit systemd-udev-trigger.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udev-trigger.service has finished successfully. ░░ ░░ The job identifier is 185. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-tmpfiles-setup-dev-early.service - Create Static Device Nodes in /dev gracefully. ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev-early.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev-early.service has finished successfully. ░░ ░░ The job identifier is 198. Dec 14 11:26:32 localhost systemd[1]: systemd-sysusers.service - Create System Users was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-sysusers.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysusers.service has finished successfully. ░░ ░░ The job identifier is 182. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev... ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev.service has begun execution. ░░ ░░ The job identifier is 145. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev. ░░ Subject: A start job for unit systemd-tmpfiles-setup-dev.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup-dev.service has finished successfully. ░░ ░░ The job identifier is 145. Dec 14 11:26:32 localhost systemd[1]: Reached target local-fs-pre.target - Preparation for Local File Systems. ░░ Subject: A start job for unit local-fs-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit local-fs-pre.target has finished successfully. ░░ ░░ The job identifier is 144. Dec 14 11:26:32 localhost systemd[1]: Reached target local-fs.target - Local File Systems. ░░ Subject: A start job for unit local-fs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit local-fs.target has finished successfully. ░░ ░░ The job identifier is 142. Dec 14 11:26:32 localhost systemd[1]: Listening on systemd-bootctl.socket - Boot Entries Service Socket. ░░ Subject: A start job for unit systemd-bootctl.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-bootctl.socket has finished successfully. ░░ ░░ The job identifier is 213. Dec 14 11:26:32 localhost systemd[1]: Listening on systemd-sysext.socket - System Extension Image Management. ░░ Subject: A start job for unit systemd-sysext.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysext.socket has finished successfully. ░░ ░░ The job identifier is 220. Dec 14 11:26:32 localhost systemd[1]: ldconfig.service - Rebuild Dynamic Linker Cache was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit ldconfig.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ldconfig.service has finished successfully. ░░ ░░ The job identifier is 146. Dec 14 11:26:32 localhost systemd[1]: selinux-autorelabel-mark.service - Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux). ░░ Subject: A start job for unit selinux-autorelabel-mark.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit selinux-autorelabel-mark.service has finished successfully. ░░ ░░ The job identifier is 190. Dec 14 11:26:32 localhost systemd[1]: systemd-binfmt.service - Set Up Additional Binary Formats was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-binfmt.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-binfmt.service has finished successfully. ░░ ░░ The job identifier is 193. Dec 14 11:26:32 localhost systemd[1]: systemd-boot-random-seed.service - Update Boot Loader Random Seed was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-boot-random-seed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-boot-random-seed.service has finished successfully. ░░ ░░ The job identifier is 179. Dec 14 11:26:32 localhost systemd[1]: systemd-confext.service - Merge System Configuration Images into /etc/ was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-confext.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-confext.service has finished successfully. ░░ ░░ The job identifier is 157. Dec 14 11:26:32 localhost systemd[1]: systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/ was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-sysext.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-sysext.service has finished successfully. ░░ ░░ The job identifier is 189. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-tmpfiles-setup.service - Create System Files and Directories... ░░ Subject: A start job for unit systemd-tmpfiles-setup.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup.service has begun execution. ░░ ░░ The job identifier is 139. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-udevd.service - Rule-based Manager for Device Events and Files... ░░ Subject: A start job for unit systemd-udevd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udevd.service has begun execution. ░░ ░░ The job identifier is 172. Dec 14 11:26:32 localhost systemd-udevd[562]: Using default interface naming scheme 'rhel-10.0-beta'. Dec 14 11:26:32 localhost systemd[1]: Started systemd-udevd.service - Rule-based Manager for Device Events and Files. ░░ Subject: A start job for unit systemd-udevd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-udevd.service has finished successfully. ░░ ░░ The job identifier is 172. Dec 14 11:26:32 localhost systemd[1]: Starting modprobe@fuse.service - Load Kernel Module fuse... ░░ Subject: A start job for unit modprobe@fuse.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@fuse.service has begun execution. ░░ ░░ The job identifier is 294. Dec 14 11:26:32 localhost systemd[1]: Starting modprobe@configfs.service - Load Kernel Module configfs... ░░ Subject: A start job for unit modprobe@configfs.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@configfs.service has begun execution. ░░ ░░ The job identifier is 302. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-tmpfiles-setup.service - Create System Files and Directories. ░░ Subject: A start job for unit systemd-tmpfiles-setup.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-setup.service has finished successfully. ░░ ░░ The job identifier is 139. Dec 14 11:26:32 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit modprobe@fuse.service has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@fuse.service - Load Kernel Module fuse. ░░ Subject: A start job for unit modprobe@fuse.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@fuse.service has finished successfully. ░░ ░░ The job identifier is 294. Dec 14 11:26:32 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit modprobe@configfs.service has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Finished modprobe@configfs.service - Load Kernel Module configfs. ░░ Subject: A start job for unit modprobe@configfs.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit modprobe@configfs.service has finished successfully. ░░ ░░ The job identifier is 302. Dec 14 11:26:32 localhost systemd[1]: Starting audit-rules.service - Load Audit Rules... ░░ Subject: A start job for unit audit-rules.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit audit-rules.service has begun execution. ░░ ░░ The job identifier is 236. Dec 14 11:26:32 localhost systemd[1]: Starting rpcbind.service - RPC Bind... ░░ Subject: A start job for unit rpcbind.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpcbind.service has begun execution. ░░ ░░ The job identifier is 253. Dec 14 11:26:32 localhost systemd[1]: systemd-firstboot.service - First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes). ░░ Subject: A start job for unit systemd-firstboot.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-firstboot.service has finished successfully. ░░ ░░ The job identifier is 180. Dec 14 11:26:32 localhost systemd[1]: first-boot-complete.target - First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes). ░░ Subject: A start job for unit first-boot-complete.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit first-boot-complete.target has finished successfully. ░░ ░░ The job identifier is 138. Dec 14 11:26:32 localhost systemd[1]: systemd-journal-catalog-update.service - Rebuild Journal Catalog was skipped because of an unmet condition check (ConditionNeedsUpdate=/var). ░░ Subject: A start job for unit systemd-journal-catalog-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-journal-catalog-update.service has finished successfully. ░░ ░░ The job identifier is 131. Dec 14 11:26:32 localhost systemd[1]: Starting systemd-machine-id-commit.service - Save Transient machine-id to Disk... ░░ Subject: A start job for unit systemd-machine-id-commit.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-machine-id-commit.service has begun execution. ░░ ░░ The job identifier is 194. Dec 14 11:26:32 localhost systemd[1]: systemd-update-done.service - Update is Completed was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit systemd-update-done.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-done.service has finished successfully. ░░ ░░ The job identifier is 168. Dec 14 11:26:32 localhost systemd[1]: Condition check resulted in dev-ttyS0.device - /dev/ttyS0 being skipped. ░░ Subject: A start job for unit dev-ttyS0.device has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dev-ttyS0.device has finished successfully. ░░ ░░ The job identifier is 232. Dec 14 11:26:32 localhost systemd[1]: Finished systemd-machine-id-commit.service - Save Transient machine-id to Disk. ░░ Subject: A start job for unit systemd-machine-id-commit.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-machine-id-commit.service has finished successfully. ░░ ░░ The job identifier is 194. Dec 14 11:26:32 localhost systemd[1]: run-credentials-systemd\x2dnetwork\x2dgenerator.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dnetwork\x2dgenerator.service.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: run-credentials-systemd\x2dsysctl.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dsysctl.service.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: run-credentials-systemd\x2dudev\x2dload\x2dcredentials.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dudev\x2dload\x2dcredentials.service.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: etc-machine\x2did.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit etc-machine\x2did.mount has successfully entered the 'dead' state. Dec 14 11:26:32 localhost systemd[1]: Mounting sys-fs-fuse-connections.mount - FUSE Control File System... ░░ Subject: A start job for unit sys-fs-fuse-connections.mount has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sys-fs-fuse-connections.mount has begun execution. ░░ ░░ The job identifier is 166. Dec 14 11:26:32 localhost systemd[1]: Mounting var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System... ░░ Subject: A start job for unit var-lib-nfs-rpc_pipefs.mount has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit var-lib-nfs-rpc_pipefs.mount has begun execution. ░░ ░░ The job identifier is 247. Dec 14 11:26:32 localhost systemd[1]: Mounted sys-fs-fuse-connections.mount - FUSE Control File System. ░░ Subject: A start job for unit sys-fs-fuse-connections.mount has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sys-fs-fuse-connections.mount has finished successfully. ░░ ░░ The job identifier is 166. Dec 14 11:26:32 localhost (udev-worker)[574]: Network interface NamePolicy= disabled on kernel command line. Dec 14 11:26:32 localhost kernel: RPC: Registered named UNIX socket transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered udp transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered tcp transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered tcp-with-tls transport module. Dec 14 11:26:32 localhost kernel: RPC: Registered tcp NFSv4.1 backchannel transport module. Dec 14 11:26:32 localhost systemd[1]: Mounted var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System. ░░ Subject: A start job for unit var-lib-nfs-rpc_pipefs.mount has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit var-lib-nfs-rpc_pipefs.mount has finished successfully. ░░ ░░ The job identifier is 247. Dec 14 11:26:32 localhost systemd[1]: Reached target rpc_pipefs.target. ░░ Subject: A start job for unit rpc_pipefs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc_pipefs.target has finished successfully. ░░ ░░ The job identifier is 246. Dec 14 11:26:33 localhost augenrules[584]: /sbin/augenrules: No change Dec 14 11:26:33 localhost augenrules[615]: No rules Dec 14 11:26:33 localhost augenrules[615]: enabled 0 Dec 14 11:26:33 localhost augenrules[615]: failure 1 Dec 14 11:26:33 localhost augenrules[615]: pid 0 Dec 14 11:26:33 localhost augenrules[615]: rate_limit 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_limit 8192 Dec 14 11:26:33 localhost augenrules[615]: lost 0 Dec 14 11:26:33 localhost augenrules[615]: backlog 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time 60000 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time_actual 0 Dec 14 11:26:33 localhost augenrules[615]: enabled 0 Dec 14 11:26:33 localhost augenrules[615]: failure 1 Dec 14 11:26:33 localhost augenrules[615]: pid 0 Dec 14 11:26:33 localhost augenrules[615]: rate_limit 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_limit 8192 Dec 14 11:26:33 localhost augenrules[615]: lost 0 Dec 14 11:26:33 localhost augenrules[615]: backlog 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time 60000 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time_actual 0 Dec 14 11:26:33 localhost augenrules[615]: enabled 0 Dec 14 11:26:33 localhost augenrules[615]: failure 1 Dec 14 11:26:33 localhost augenrules[615]: pid 0 Dec 14 11:26:33 localhost augenrules[615]: rate_limit 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_limit 8192 Dec 14 11:26:33 localhost augenrules[615]: lost 0 Dec 14 11:26:33 localhost augenrules[615]: backlog 0 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time 60000 Dec 14 11:26:33 localhost augenrules[615]: backlog_wait_time_actual 0 Dec 14 11:26:33 localhost systemd[1]: audit-rules.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit audit-rules.service has successfully entered the 'dead' state. Dec 14 11:26:33 localhost systemd[1]: Finished audit-rules.service - Load Audit Rules. ░░ Subject: A start job for unit audit-rules.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit audit-rules.service has finished successfully. ░░ ░░ The job identifier is 236. Dec 14 11:26:33 localhost systemd[1]: Starting auditd.service - Security Audit Logging Service... ░░ Subject: A start job for unit auditd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit auditd.service has begun execution. ░░ ░░ The job identifier is 235. Dec 14 11:26:33 localhost kernel: input: PC Speaker as /devices/platform/pcspkr/input/input5 Dec 14 11:26:33 localhost kernel: cirrus 0000:00:02.0: vgaarb: deactivate vga console Dec 14 11:26:33 localhost kernel: Console: switching to colour dummy device 80x25 Dec 14 11:26:33 localhost kernel: [drm] Initialized cirrus 2.0.0 for 0000:00:02.0 on minor 0 Dec 14 11:26:33 localhost kernel: fbcon: cirrusdrmfb (fb0) is primary device Dec 14 11:26:33 localhost kernel: Console: switching to colour frame buffer device 128x48 Dec 14 11:26:33 localhost kernel: cirrus 0000:00:02.0: [drm] fb0: cirrusdrmfb frame buffer device Dec 14 11:26:33 localhost systemd[1]: Started auditd.service - Security Audit Logging Service. ░░ Subject: A start job for unit auditd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit auditd.service has finished successfully. ░░ ░░ The job identifier is 235. Dec 14 11:26:33 localhost auditd[625]: No plugins found, not dispatching events Dec 14 11:26:33 localhost systemd[1]: Starting systemd-update-utmp.service - Record System Boot/Shutdown in UTMP... ░░ Subject: A start job for unit systemd-update-utmp.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp.service has begun execution. ░░ ░░ The job identifier is 258. Dec 14 11:26:33 localhost auditd[625]: Init complete, auditd 4.0 listening for events (startup state enable) Dec 14 11:26:33 localhost systemd[1]: Started rpcbind.service - RPC Bind. ░░ Subject: A start job for unit rpcbind.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpcbind.service has finished successfully. ░░ ░░ The job identifier is 253. Dec 14 11:26:33 localhost systemd[1]: Finished systemd-update-utmp.service - Record System Boot/Shutdown in UTMP. ░░ Subject: A start job for unit systemd-update-utmp.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp.service has finished successfully. ░░ ░░ The job identifier is 258. Dec 14 11:26:33 localhost systemd[1]: Reached target sysinit.target - System Initialization. ░░ Subject: A start job for unit sysinit.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sysinit.target has finished successfully. ░░ ░░ The job identifier is 125. Dec 14 11:26:33 localhost systemd[1]: Started dnf-makecache.timer - dnf makecache --timer. ░░ Subject: A start job for unit dnf-makecache.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dnf-makecache.timer has finished successfully. ░░ ░░ The job identifier is 202. Dec 14 11:26:33 localhost kernel: RAPL PMU: API unit is 2^-32 Joules, 0 fixed counters, 655360 ms ovfl timer Dec 14 11:26:33 localhost systemd[1]: Started fstrim.timer - Discard unused filesystem blocks once a week. ░░ Subject: A start job for unit fstrim.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.timer has finished successfully. ░░ ░░ The job identifier is 201. Dec 14 11:26:33 localhost kernel: piix4_smbus 0000:00:01.3: SMBus base address uninitialized - upgrade BIOS or use force_addr=0xaddr Dec 14 11:26:33 localhost systemd[1]: Started logrotate.timer - Daily rotation of log files. ░░ Subject: A start job for unit logrotate.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit logrotate.timer has finished successfully. ░░ ░░ The job identifier is 209. Dec 14 11:26:33 localhost systemd[1]: Started systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories. ░░ Subject: A start job for unit systemd-tmpfiles-clean.timer has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-tmpfiles-clean.timer has finished successfully. ░░ ░░ The job identifier is 210. Dec 14 11:26:33 localhost systemd[1]: Reached target timers.target - Timer Units. ░░ Subject: A start job for unit timers.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit timers.target has finished successfully. ░░ ░░ The job identifier is 200. Dec 14 11:26:33 localhost systemd[1]: Listening on dbus.socket - D-Bus System Message Bus Socket. ░░ Subject: A start job for unit dbus.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dbus.socket has finished successfully. ░░ ░░ The job identifier is 206. Dec 14 11:26:33 localhost systemd[1]: Listening on pcscd.socket - PC/SC Smart Card Daemon Activation Socket. ░░ Subject: A start job for unit pcscd.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit pcscd.socket has finished successfully. ░░ ░░ The job identifier is 222. Dec 14 11:26:33 localhost systemd[1]: Listening on sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket. ░░ Subject: A start job for unit sssd-kcm.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sssd-kcm.socket has finished successfully. ░░ ░░ The job identifier is 214. Dec 14 11:26:33 localhost systemd[1]: Listening on systemd-hostnamed.socket - Hostname Service Socket. ░░ Subject: A start job for unit systemd-hostnamed.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.socket has finished successfully. ░░ ░░ The job identifier is 223. Dec 14 11:26:33 localhost systemd[1]: Reached target sockets.target - Socket Units. ░░ Subject: A start job for unit sockets.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sockets.target has finished successfully. ░░ ░░ The job identifier is 211. Dec 14 11:26:33 localhost systemd[1]: Starting dbus-broker.service - D-Bus System Message Bus... ░░ Subject: A start job for unit dbus-broker.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dbus-broker.service has begun execution. ░░ ░░ The job identifier is 207. Dec 14 11:26:33 localhost systemd[1]: systemd-pcrphase-sysinit.service - TPM PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-pcrphase-sysinit.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pcrphase-sysinit.service has finished successfully. ░░ ░░ The job identifier is 135. Dec 14 11:26:33 localhost systemd[1]: Starting systemd-vconsole-setup.service - Virtual Console Setup... ░░ Subject: A start job for unit systemd-vconsole-setup.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-vconsole-setup.service has begun execution. ░░ ░░ The job identifier is 318. Dec 14 11:26:33 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-vconsole-setup.service has successfully entered the 'dead' state. Dec 14 11:26:33 localhost systemd[1]: Stopped systemd-vconsole-setup.service - Virtual Console Setup. ░░ Subject: A stop job for unit systemd-vconsole-setup.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit systemd-vconsole-setup.service has finished. ░░ ░░ The job identifier is 318 and the job result is done. Dec 14 11:26:33 localhost systemd[1]: Starting systemd-vconsole-setup.service - Virtual Console Setup... ░░ Subject: A start job for unit systemd-vconsole-setup.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-vconsole-setup.service has begun execution. ░░ ░░ The job identifier is 318. Dec 14 11:26:33 localhost systemd[1]: Started dbus-broker.service - D-Bus System Message Bus. ░░ Subject: A start job for unit dbus-broker.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dbus-broker.service has finished successfully. ░░ ░░ The job identifier is 207. Dec 14 11:26:33 localhost systemd[1]: Reached target basic.target - Basic System. ░░ Subject: A start job for unit basic.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit basic.target has finished successfully. ░░ ░░ The job identifier is 122. Dec 14 11:26:33 localhost dbus-broker-launch[637]: Ready Dec 14 11:26:33 localhost systemd[1]: Starting chronyd.service - NTP client/server... ░░ Subject: A start job for unit chronyd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit chronyd.service has begun execution. ░░ ░░ The job identifier is 268. Dec 14 11:26:33 localhost systemd[1]: Starting cloud-init-local.service - Initial cloud-init job (pre-networking)... ░░ Subject: A start job for unit cloud-init-local.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init-local.service has begun execution. ░░ ░░ The job identifier is 275. Dec 14 11:26:33 localhost systemd[1]: Starting dracut-shutdown.service - Restore /run/initramfs on shutdown... ░░ Subject: A start job for unit dracut-shutdown.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dracut-shutdown.service has begun execution. ░░ ░░ The job identifier is 184. Dec 14 11:26:33 localhost systemd[1]: Started irqbalance.service - irqbalance daemon. ░░ Subject: A start job for unit irqbalance.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit irqbalance.service has finished successfully. ░░ ░░ The job identifier is 234. Dec 14 11:26:33 localhost systemd[1]: Started rngd.service - Hardware RNG Entropy Gatherer Daemon. ░░ Subject: A start job for unit rngd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rngd.service has finished successfully. ░░ ░░ The job identifier is 259. Dec 14 11:26:33 localhost systemd[1]: Starting rsyslog.service - System Logging Service... ░░ Subject: A start job for unit rsyslog.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has begun execution. ░░ ░░ The job identifier is 272. Dec 14 11:26:33 localhost systemd[1]: ssh-host-keys-migration.service - Update OpenSSH host key permissions was skipped because of an unmet condition check (ConditionPathExists=!/var/lib/.ssh-host-keys-migration). ░░ Subject: A start job for unit ssh-host-keys-migration.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ssh-host-keys-migration.service has finished successfully. ░░ ░░ The job identifier is 267. Dec 14 11:26:33 localhost systemd[1]: sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ecdsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ecdsa.service has finished successfully. ░░ ░░ The job identifier is 263. Dec 14 11:26:33 localhost systemd[1]: sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ed25519.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ed25519.service has finished successfully. ░░ ░░ The job identifier is 265. Dec 14 11:26:33 localhost systemd[1]: sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@rsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@rsa.service has finished successfully. ░░ ░░ The job identifier is 266. Dec 14 11:26:33 localhost systemd[1]: Reached target sshd-keygen.target. ░░ Subject: A start job for unit sshd-keygen.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen.target has finished successfully. ░░ ░░ The job identifier is 262. Dec 14 11:26:33 localhost systemd[1]: sssd.service - System Security Services Daemon was skipped because no trigger condition checks were met. ░░ Subject: A start job for unit sssd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sssd.service has finished successfully. ░░ ░░ The job identifier is 237. Dec 14 11:26:33 localhost systemd[1]: Reached target nss-user-lookup.target - User and Group Name Lookups. ░░ Subject: A start job for unit nss-user-lookup.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit nss-user-lookup.target has finished successfully. ░░ ░░ The job identifier is 238. Dec 14 11:26:33 localhost systemd[1]: Starting systemd-logind.service - User Login Management... ░░ Subject: A start job for unit systemd-logind.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-logind.service has begun execution. ░░ ░░ The job identifier is 250. Dec 14 11:26:33 localhost systemd[1]: Finished dracut-shutdown.service - Restore /run/initramfs on shutdown. ░░ Subject: A start job for unit dracut-shutdown.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit dracut-shutdown.service has finished successfully. ░░ ░░ The job identifier is 184. Dec 14 11:26:33 localhost (qbalance)[649]: irqbalance.service: Referenced but unset environment variable evaluates to an empty string: IRQBALANCE_ARGS Dec 14 11:26:33 localhost systemd[1]: Started rsyslog.service - System Logging Service. ░░ Subject: A start job for unit rsyslog.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rsyslog.service has finished successfully. ░░ ░░ The job identifier is 272. Dec 14 11:26:33 localhost rsyslogd[651]: imjournal: filecreatemode is not set, using default 0644 [v8.2408.0-2.el10 try https://www.rsyslog.com/e/2186 ] Dec 14 11:26:33 localhost rsyslogd[651]: [origin software="rsyslogd" swVersion="8.2408.0-2.el10" x-pid="651" x-info="https://www.rsyslog.com"] start Dec 14 11:26:33 localhost systemd-logind[653]: New seat seat0. ░░ Subject: A new seat seat0 is now available ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new seat seat0 has been configured and is now available. Dec 14 11:26:33 localhost systemd-logind[653]: Watching system buttons on /dev/input/event0 (Power Button) Dec 14 11:26:33 localhost systemd-logind[653]: Watching system buttons on /dev/input/event1 (Sleep Button) Dec 14 11:26:33 localhost systemd-logind[653]: Watching system buttons on /dev/input/event2 (AT Translated Set 2 keyboard) Dec 14 11:26:33 localhost systemd[1]: Started systemd-logind.service - User Login Management. ░░ Subject: A start job for unit systemd-logind.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-logind.service has finished successfully. ░░ ░░ The job identifier is 250. Dec 14 11:26:33 localhost systemd[1]: Finished systemd-vconsole-setup.service - Virtual Console Setup. ░░ Subject: A start job for unit systemd-vconsole-setup.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-vconsole-setup.service has finished successfully. ░░ ░░ The job identifier is 318. Dec 14 11:26:33 localhost systemd[1]: run-credentials-systemd\x2dvconsole\x2dsetup.service.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-credentials-systemd\x2dvconsole\x2dsetup.service.mount has successfully entered the 'dead' state. Dec 14 11:26:33 localhost rsyslogd[651]: imjournal: journal files changed, reloading... [v8.2408.0-2.el10 try https://www.rsyslog.com/e/0 ] Dec 14 11:26:33 localhost rngd[650]: Disabling 7: PKCS11 Entropy generator (pkcs11) Dec 14 11:26:33 localhost rngd[650]: Disabling 5: NIST Network Entropy Beacon (nist) Dec 14 11:26:33 localhost rngd[650]: Disabling 9: Qrypt quantum entropy beacon (qrypt) Dec 14 11:26:33 localhost rngd[650]: Disabling 10: Named pipe entropy input (namedpipe) Dec 14 11:26:33 localhost rngd[650]: Initializing available sources Dec 14 11:26:33 localhost rngd[650]: [hwrng ]: Initialization Failed Dec 14 11:26:33 localhost rngd[650]: [rdrand]: Enabling RDRAND rng support Dec 14 11:26:33 localhost rngd[650]: [rdrand]: Initialized Dec 14 11:26:33 localhost rngd[650]: [jitter]: JITTER timeout set to 5 sec Dec 14 11:26:33 localhost chronyd[664]: chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +NTS +SECHASH +IPV6 +DEBUG) Dec 14 11:26:33 localhost rngd[650]: [jitter]: Initializing AES buffer Dec 14 11:26:33 localhost chronyd[664]: Frequency 0.000 +/- 1000000.000 ppm read from /var/lib/chrony/drift Dec 14 11:26:33 localhost systemd[1]: Started chronyd.service - NTP client/server. ░░ Subject: A start job for unit chronyd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit chronyd.service has finished successfully. ░░ ░░ The job identifier is 268. Dec 14 11:26:33 localhost chronyd[664]: Loaded seccomp filter (level 2) Dec 14 11:26:36 localhost cloud-init[671]: Cloud-init v. 24.1.4-21.el10 running 'init-local' at Sat, 14 Dec 2024 16:26:36 +0000. Up 12.60 seconds. Dec 14 11:26:36 localhost dhcpcd[673]: dhcpcd-10.0.6 starting Dec 14 11:26:36 localhost kernel: 8021q: 802.1Q VLAN Support v1.8 Dec 14 11:26:36 localhost systemd[1]: Listening on systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch. ░░ Subject: A start job for unit systemd-rfkill.socket has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-rfkill.socket has finished successfully. ░░ ░░ The job identifier is 328. Dec 14 11:26:37 localhost kernel: cfg80211: Loading compiled-in X.509 certificates for regulatory database Dec 14 11:26:37 localhost kernel: Loaded X.509 cert 'sforshee: 00b28ddf47aef9cea7' Dec 14 11:26:37 localhost kernel: Loaded X.509 cert 'wens: 61c038651aabdcf94bd0ac7ff06c7248db18c600' Dec 14 11:26:37 localhost dhcpcd[676]: DUID 00:01:00:01:2e:f0:6e:3d:0e:03:6a:4a:4d:55 Dec 14 11:26:37 localhost dhcpcd[676]: eth0: IAID 6a:4a:4d:55 Dec 14 11:26:37 localhost kernel: platform regulatory.0: Direct firmware load for regulatory.db failed with error -2 Dec 14 11:26:37 localhost kernel: cfg80211: failed to load regulatory.db Dec 14 11:26:38 localhost rngd[650]: [jitter]: Unable to obtain AES key, disabling JITTER source Dec 14 11:26:38 localhost rngd[650]: [jitter]: Initialization Failed Dec 14 11:26:38 localhost rngd[650]: Process privileges have been dropped to 2:2 Dec 14 11:26:38 localhost dhcpcd[676]: eth0: soliciting a DHCP lease Dec 14 11:26:38 localhost dhcpcd[676]: eth0: offered 10.31.43.117 from 10.31.40.1 Dec 14 11:26:38 localhost dhcpcd[676]: eth0: leased 10.31.43.117 for 3600 seconds Dec 14 11:26:38 localhost dhcpcd[676]: eth0: adding route to 10.31.40.0/22 Dec 14 11:26:38 localhost dhcpcd[676]: eth0: adding default route via 10.31.40.1 Dec 14 11:26:38 localhost dhcpcd[676]: control command: /usr/sbin/dhcpcd --dumplease --ipv4only eth0 Dec 14 11:26:38 localhost systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 337. Dec 14 11:26:38 localhost systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 337. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-hostnamed[696]: Hostname set to (static) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init-local.service - Initial cloud-init job (pre-networking). ░░ Subject: A start job for unit cloud-init-local.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init-local.service has finished successfully. ░░ ░░ The job identifier is 275. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target network-pre.target - Preparation for Network. ░░ Subject: A start job for unit network-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-pre.target has finished successfully. ░░ ░░ The job identifier is 156. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager.service - Network Manager... ░░ Subject: A start job for unit NetworkManager.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has begun execution. ░░ ░░ The job identifier is 205. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7758] NetworkManager (version 1.51.4-1.el10) is starting... (boot:38eff4b5-157f-400c-9c9a-01c5bd7302d2) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7760] Read config: /etc/NetworkManager/NetworkManager.conf, /etc/NetworkManager/conf.d/30-cloud-init-ip6-addr-gen-mode.conf Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7889] manager[0x557f01cc5a10]: monitoring kernel firmware directory '/lib/firmware'. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7922] hostname: hostname: using hostnamed Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7922] hostname: static hostname changed from (none) to "ip-10-31-43-117.us-east-1.aws.redhat.com" Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7926] dns-mgr: init: dns=default,systemd-resolved rc-manager=symlink (auto) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7930] manager[0x557f01cc5a10]: rfkill: Wi-Fi hardware radio set enabled Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7930] manager[0x557f01cc5a10]: rfkill: WWAN hardware radio set enabled Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7986] manager: rfkill: Wi-Fi enabled by radio killswitch; enabled by state file Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7987] manager: rfkill: WWAN enabled by radio killswitch; enabled by state file Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.7987] manager: Networking is enabled by state file Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8012] settings: Loaded settings plugin: keyfile (internal) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 415. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8087] dhcp: init: Using DHCP client 'internal' Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8090] manager: (lo): new Loopback device (/org/freedesktop/NetworkManager/Devices/1) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8101] device (lo): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8125] device (lo): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8131] device (lo): Activation: starting connection 'lo' (77f275e6-4c01-4392-ab9b-e140983cfde9) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8137] manager: (eth0): new Ethernet device (/org/freedesktop/NetworkManager/Devices/2) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8141] device (eth0): state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager.service - Network Manager. ░░ Subject: A start job for unit NetworkManager.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager.service has finished successfully. ░░ ░░ The job identifier is 205. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8174] bus-manager: acquired D-Bus service "org.freedesktop.NetworkManager" Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target network.target - Network. ░░ Subject: A start job for unit network.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network.target has finished successfully. ░░ ░░ The job identifier is 208. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8196] device (lo): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8198] device (lo): state change: prepare -> config (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8200] device (lo): state change: config -> ip-config (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8201] device (eth0): carrier: link connected Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8203] device (lo): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8218] device (eth0): state change: unavailable -> disconnected (reason 'carrier-changed', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting NetworkManager-wait-online.service - Network Manager Wait Online... ░░ Subject: A start job for unit NetworkManager-wait-online.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has begun execution. ░░ ░░ The job identifier is 204. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8237] policy: auto-activating connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8242] device (eth0): Activation: starting connection 'cloud-init eth0' (1dd9a779-d327-56e1-8454-c65e2556c12c) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8245] device (eth0): state change: disconnected -> prepare (reason 'none', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8248] manager: NetworkManager state is now CONNECTING Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8252] device (eth0): state change: prepare -> config (reason 'none', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8261] device (eth0): state change: config -> ip-config (reason 'none', managed-type: 'full') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8272] dhcp4 (eth0): activation: beginning transaction (timeout in 45 seconds) Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting gssproxy.service - GSSAPI Proxy Daemon... ░░ Subject: A start job for unit gssproxy.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has begun execution. ░░ ░░ The job identifier is 244. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.8299] dhcp4 (eth0): state changed new lease, address=10.31.43.117, acd pending Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started gssproxy.service - GSSAPI Proxy Daemon. ░░ Subject: A start job for unit gssproxy.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit gssproxy.service has finished successfully. ░░ ░░ The job identifier is 244. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: rpc-gssd.service - RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab). ░░ Subject: A start job for unit rpc-gssd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-gssd.service has finished successfully. ░░ ░░ The job identifier is 245. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target nfs-client.target - NFS client services. ░░ Subject: A start job for unit nfs-client.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit nfs-client.target has finished successfully. ░░ ░░ The job identifier is 241. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs-pre.target - Preparation for Remote File Systems. ░░ Subject: A start job for unit remote-fs-pre.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs-pre.target has finished successfully. ░░ ░░ The job identifier is 249. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target remote-cryptsetup.target - Remote Encrypted Volumes. ░░ Subject: A start job for unit remote-cryptsetup.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-cryptsetup.target has finished successfully. ░░ ░░ The job identifier is 260. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target remote-fs.target - Remote File Systems. ░░ Subject: A start job for unit remote-fs.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit remote-fs.target has finished successfully. ░░ ░░ The job identifier is 271. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: systemd-pcrphase.service - TPM PCR Barrier (User) was skipped because of an unmet condition check (ConditionSecurity=measured-uki). ░░ Subject: A start job for unit systemd-pcrphase.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-pcrphase.service has finished successfully. ░░ ░░ The job identifier is 170. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 415. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9503] device (lo): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9512] device (lo): state change: secondaries -> activated (reason 'none', managed-type: 'external') Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9539] device (lo): Activation: successful, device activated. Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9964] dhcp4 (eth0): state changed new lease, address=10.31.43.117 Dec 14 11:26:38 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193598.9975] policy: set 'cloud-init eth0' (eth0) as default for IPv4 routing and DNS Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0371] device (eth0): state change: ip-config -> ip-check (reason 'none', managed-type: 'full') Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0438] device (eth0): state change: ip-check -> secondaries (reason 'none', managed-type: 'full') Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0445] device (eth0): state change: secondaries -> activated (reason 'none', managed-type: 'full') Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0450] manager: NetworkManager state is now CONNECTED_SITE Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0460] device (eth0): Activation: successful, device activated. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0466] manager: NetworkManager state is now CONNECTED_GLOBAL Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com NetworkManager[703]: [1734193599.0468] manager: startup complete Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished NetworkManager-wait-online.service - Network Manager Wait Online. ░░ Subject: A start job for unit NetworkManager-wait-online.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-wait-online.service has finished successfully. ░░ ░░ The job identifier is 204. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting cloud-init.service - Initial cloud-init job (metadata service crawler)... ░░ Subject: A start job for unit cloud-init.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has begun execution. ░░ ░░ The job identifier is 274. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.11.160.238 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.18.100.10 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.2.32.37 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Added source 10.2.32.38 Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Cloud-init v. 24.1.4-21.el10 running 'init' at Sat, 14 Dec 2024 16:26:39 +0000. Up 15.52 seconds. Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: ++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++++++++++ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | Device | Up | Address | Mask | Scope | Hw-Address | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | eth0 | True | 10.31.43.117 | 255.255.252.0 | global | 0e:03:6a:4a:4d:55 | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | eth0 | True | fe80::c03:6aff:fe4a:4d55/64 | . | link | 0e:03:6a:4a:4d:55 | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | lo | True | 127.0.0.1 | 255.0.0.0 | host | . | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | lo | True | ::1/128 | . | host | . | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +--------+------+-----------------------------+---------------+--------+-------------------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: ++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | Route | Destination | Gateway | Genmask | Interface | Flags | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 0 | 0.0.0.0 | 10.31.40.1 | 0.0.0.0 | eth0 | UG | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 1 | 10.31.40.0 | 0.0.0.0 | 255.255.252.0 | eth0 | U | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+------------+---------------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +++++++++++++++++++Route IPv6 info+++++++++++++++++++ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+---------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | Route | Destination | Gateway | Interface | Flags | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+---------+-----------+-------+ Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 0 | fe80::/64 | :: | eth0 | U | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: | 2 | multicast | :: | eth0 | U | Dec 14 11:26:39 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: ci-info: +-------+-------------+---------+-----------+-------+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Generating public/private rsa key pair. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key fingerprint is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: SHA256:4kPeOc6dyUInlbtyxVsYnSTAiNqxtK7A/xVLG9s/emE root@ip-10-31-43-117.us-east-1.aws.redhat.com Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key's randomart image is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +---[RSA 3072]----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . o.. | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | + . . . . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | + + . + . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . + o . o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . .o S. o o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o +.+oOo E . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o .+.Oo.+ + | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o =o+o++ | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | .. o+*o.. | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +----[SHA256]-----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Generating public/private ecdsa key pair. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key fingerprint is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: SHA256:RdoWwMOSgw51mBD28FuLabo0FGe7XvtI9kDaq60uA5s root@ip-10-31-43-117.us-east-1.aws.redhat.com Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key's randomart image is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +---[ECDSA 256]---+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | =+.+.+..o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | ..++.+ ++ . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | .o= .o..+ | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | +.* . o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . * o S | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |.. o = | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | ++ o * | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |E.o+ = * | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | .+=o=.o | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +----[SHA256]-----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Generating public/private ed25519 key pair. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key fingerprint is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: SHA256:v1uLzZ9r22pXF2QsO+gHOGERom4ErsGOiGgKtl7LE5E root@ip-10-31-43-117.us-east-1.aws.redhat.com Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: The key's randomart image is: Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +--[ED25519 256]--+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . . oo . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |. . . . .o . + | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | o ..o . o . = | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |* oEo o o o . | Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |*= .o S o . . .| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |= ... . . . o| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |.. .. . o o| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: |. o.. * .oo.| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: | . o. +.++*=.| Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[790]: +----[SHA256]-----+ Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-init.service - Initial cloud-init job (metadata service crawler). ░░ Subject: A start job for unit cloud-init.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.service has finished successfully. ░░ ░░ The job identifier is 274. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-config.target - Cloud-config availability. ░░ Subject: A start job for unit cloud-config.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.target has finished successfully. ░░ ░░ The job identifier is 277. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target network-online.target - Network is Online. ░░ Subject: A start job for unit network-online.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit network-online.target has finished successfully. ░░ ░░ The job identifier is 203. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting cloud-config.service - Apply the settings specified in cloud-config... ░░ Subject: A start job for unit cloud-config.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has begun execution. ░░ ░░ The job identifier is 276. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting kdump.service - Crash recovery kernel arming... ░░ Subject: A start job for unit kdump.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has begun execution. ░░ ░░ The job identifier is 256. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting restraintd.service - The restraint harness.... ░░ Subject: A start job for unit restraintd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has begun execution. ░░ ░░ The job identifier is 239. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting rpc-statd-notify.service - Notify NFS peers of a restart... ░░ Subject: A start job for unit rpc-statd-notify.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has begun execution. ░░ ░░ The job identifier is 242. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 261. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com sm-notify[872]: Version 2.7.1 starting Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started rpc-statd-notify.service - Notify NFS peers of a restart. ░░ Subject: A start job for unit rpc-statd-notify.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit rpc-statd-notify.service has finished successfully. ░░ ░░ The job identifier is 242. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com (sshd)[873]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started restraintd.service - The restraint harness.. ░░ Subject: A start job for unit restraintd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit restraintd.service has finished successfully. ░░ ░░ The job identifier is 239. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[873]: Server listening on 0.0.0.0 port 22. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[873]: Server listening on :: port 22. Dec 14 11:26:40 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 261. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[916]: Cloud-init v. 24.1.4-21.el10 running 'modules:config' at Sat, 14 Dec 2024 16:26:41 +0000. Up 17.21 seconds. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[873]: Received signal 15; terminating. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopping sshd.service - OpenSSH server daemon... ░░ Subject: A stop job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 507. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit sshd.service has successfully entered the 'dead' state. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopped sshd.service - OpenSSH server daemon. ░░ Subject: A stop job for unit sshd.service has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd.service has finished. ░░ ░░ The job identifier is 507 and the job result is done. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopped target sshd-keygen.target. ░░ Subject: A stop job for unit sshd-keygen.target has finished ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has finished. ░░ ░░ The job identifier is 591 and the job result is done. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Stopping sshd-keygen.target... ░░ Subject: A stop job for unit sshd-keygen.target has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A stop job for unit sshd-keygen.target has begun execution. ░░ ░░ The job identifier is 591. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: ssh-host-keys-migration.service - Update OpenSSH host key permissions was skipped because of an unmet condition check (ConditionPathExists=!/var/lib/.ssh-host-keys-migration). ░░ Subject: A start job for unit ssh-host-keys-migration.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit ssh-host-keys-migration.service has finished successfully. ░░ ░░ The job identifier is 590. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ecdsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ecdsa.service has finished successfully. ░░ ░░ The job identifier is 586. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@ed25519.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@ed25519.service has finished successfully. ░░ ░░ The job identifier is 588. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target). ░░ Subject: A start job for unit sshd-keygen@rsa.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen@rsa.service has finished successfully. ░░ ░░ The job identifier is 589. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target sshd-keygen.target. ░░ Subject: A start job for unit sshd-keygen.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd-keygen.target has finished successfully. ░░ ░░ The job identifier is 591. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting sshd.service - OpenSSH server daemon... ░░ Subject: A start job for unit sshd.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has begun execution. ░░ ░░ The job identifier is 507. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com (sshd)[920]: sshd.service: Referenced but unset environment variable evaluates to an empty string: OPTIONS Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[920]: Server listening on 0.0.0.0 port 22. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com sshd[920]: Server listening on :: port 22. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started sshd.service - OpenSSH server daemon. ░░ Subject: A start job for unit sshd.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit sshd.service has finished successfully. ░░ ░░ The job identifier is 507. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com restraintd[877]: Listening on http://localhost:8081 Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-config.service - Apply the settings specified in cloud-config. ░░ Subject: A start job for unit cloud-config.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-config.service has finished successfully. ░░ ░░ The job identifier is 276. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting cloud-final.service - Execute cloud user/final scripts... ░░ Subject: A start job for unit cloud-final.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has begun execution. ░░ ░░ The job identifier is 278. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting systemd-user-sessions.service - Permit User Sessions... ░░ Subject: A start job for unit systemd-user-sessions.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has begun execution. ░░ ░░ The job identifier is 240. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished systemd-user-sessions.service - Permit User Sessions. ░░ Subject: A start job for unit systemd-user-sessions.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-user-sessions.service has finished successfully. ░░ ░░ The job identifier is 240. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started crond.service - Command Scheduler. ░░ Subject: A start job for unit crond.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit crond.service has finished successfully. ░░ ░░ The job identifier is 255. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started getty@tty1.service - Getty on tty1. ░░ Subject: A start job for unit getty@tty1.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty@tty1.service has finished successfully. ░░ ░░ The job identifier is 227. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started serial-getty@ttyS0.service - Serial Getty on ttyS0. ░░ Subject: A start job for unit serial-getty@ttyS0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit serial-getty@ttyS0.service has finished successfully. ░░ ░░ The job identifier is 231. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target getty.target - Login Prompts. ░░ Subject: A start job for unit getty.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit getty.target has finished successfully. ░░ ░░ The job identifier is 226. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target multi-user.target - Multi-User System. ░░ Subject: A start job for unit multi-user.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit multi-user.target has finished successfully. ░░ ░░ The job identifier is 121. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) STARTUP (1.7.0) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP... ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has begun execution. ░░ ░░ The job identifier is 257. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) INFO (Syslog will be used instead of sendmail.) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 98% if used.) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com crond[926]: (CRON) INFO (running with inotify support) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-update-utmp-runlevel.service has successfully entered the 'dead' state. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP. ░░ Subject: A start job for unit systemd-update-utmp-runlevel.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-update-utmp-runlevel.service has finished successfully. ░░ ░░ The job identifier is 257. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Detected change(s) in the following file(s): /etc/fstab Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1020]: Cloud-init v. 24.1.4-21.el10 running 'modules:final' at Sat, 14 Dec 2024 16:26:41 +0000. Up 17.71 seconds. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1076]: ############################################################# Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1077]: -----BEGIN SSH HOST KEY FINGERPRINTS----- Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1079]: 256 SHA256:RdoWwMOSgw51mBD28FuLabo0FGe7XvtI9kDaq60uA5s root@ip-10-31-43-117.us-east-1.aws.redhat.com (ECDSA) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1085]: 256 SHA256:v1uLzZ9r22pXF2QsO+gHOGERom4ErsGOiGgKtl7LE5E root@ip-10-31-43-117.us-east-1.aws.redhat.com (ED25519) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1090]: 3072 SHA256:4kPeOc6dyUInlbtyxVsYnSTAiNqxtK7A/xVLG9s/emE root@ip-10-31-43-117.us-east-1.aws.redhat.com (RSA) Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1092]: -----END SSH HOST KEY FINGERPRINTS----- Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1095]: ############################################################# Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com cloud-init[1020]: Cloud-init v. 24.1.4-21.el10 finished at Sat, 14 Dec 2024 16:26:41 +0000. Datasource DataSourceEc2Local. Up 17.88 seconds Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished cloud-final.service - Execute cloud user/final scripts. ░░ Subject: A start job for unit cloud-final.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-final.service has finished successfully. ░░ ░░ The job identifier is 278. Dec 14 11:26:41 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Reached target cloud-init.target - Cloud-init target. ░░ Subject: A start job for unit cloud-init.target has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit cloud-init.target has finished successfully. ░░ ░░ The job identifier is 273. Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 0 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 0 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 48 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 48 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 49 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 49 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 50 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 50 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 51 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 51 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 52 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 52 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 53 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 53 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 54 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 54 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 55 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 55 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 56 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 56 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 57 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 57 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 58 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 58 affinity is now unmanaged Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: Cannot change IRQ 59 affinity: Permission denied Dec 14 11:26:43 ip-10-31-43-117.us-east-1.aws.redhat.com irqbalance[649]: IRQ 59 affinity is now unmanaged Dec 14 11:26:44 ip-10-31-43-117.us-east-1.aws.redhat.com kernel: block xvda: the capability attribute has been deprecated. Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Selected source 10.2.32.38 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Rebuilding /boot/initramfs-6.12.0-31.el10.x86_64kdump.img Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1384]: dracut-103-1.el10 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1387]: Executing: /usr/bin/dracut --list-modules Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1458]: dracut-103-1.el10 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Executing: /usr/bin/dracut --add kdumpbase --quiet --hostonly --hostonly-cmdline --hostonly-i18n --hostonly-mode strict --hostonly-nics --aggressive-strip --omit "rdma plymouth resume ifcfg earlykdump" --mount "/dev/disk/by-uuid/f3bb1e80-fac3-4b5e-93f6-d763469176c6 /sysroot xfs rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota" --add squash-squashfs --squash-compressor zstd --no-hostonly-default-device -f /boot/initramfs-6.12.0-31.el10.x86_64kdump.img 6.12.0-31.el10.x86_64 Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-networkd' will not be installed, because command '/usr/lib/systemd/systemd-networkd-wait-online' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Dec 14 11:26:45 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'ifcfg' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'plymouth' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'resume' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'biosdevname' will not be installed, because command 'biosdevname' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'earlykdump' will not be installed, because it's in the list to be omitted! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-bsod' will not be installed, because command '/usr/lib/systemd/systemd-bsod' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-pcrphase' will not be installed, because command '/usr/lib/systemd/systemd-pcrphase' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command 'portablectl' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-portabled' will not be installed, because command '/usr/lib/systemd/systemd-portabled' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-resolved' will not be installed, because command '/usr/lib/systemd/systemd-resolved' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-timesyncd' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'systemd-timesyncd' will not be installed, because command '/usr/lib/systemd/systemd-time-wait-sync' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'busybox' will not be installed, because command 'busybox' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dbus-daemon' will not be installed, because command 'dbus-daemon' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmanctl' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'connman' will not be installed, because command 'connmand-wait-online' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: 62bluetooth: Could not find any command of '/usr/lib/bluetooth/bluetoothd /usr/libexec/bluetooth/bluetoothd'! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'btrfs' will not be installed, because command 'btrfs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'dmraid' will not be installed, because command 'dmraid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'mdraid' will not be installed, because command 'mdadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'multipath' will not be installed, because command 'multipath' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'cifs' will not be installed, because command 'mount.cifs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsi-iname' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsiadm' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'iscsi' will not be installed, because command 'iscsid' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'nvmf' will not be installed, because command 'nvme' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'mkfs.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Module 'squash-erofs' will not be installed, because command 'fsck.erofs' could not be found! Dec 14 11:26:46 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fips *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fips-crypto-policies *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-ask-password *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-initrd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-journald *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-modules-load *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-sysctl *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-sysusers *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-tmpfiles *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: systemd-udevd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: rngd *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: i18n *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: drm *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: prefixdevname *** Dec 14 11:26:47 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: kernel-modules *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: kernel-modules-extra *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: configuration source "/run/depmod.d" does not exist Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: configuration source "/lib/depmod.d" does not exist Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: parsing configuration file "/etc/depmod.d/dist.conf" Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: kernel-modules-extra: /etc/depmod.d/dist.conf: added "updates extra built-in weak-updates" to the list of search directories Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: pcmcia *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Skipping udev rule: 60-pcmcia.rules Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fstab-sys *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: hwdb *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: rootfs-block *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: squash-squashfs *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: terminfo *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: udev-rules *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: dracut-systemd *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: usrmount *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: base *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: fs-lib *** Dec 14 11:26:48 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: kdumpbase *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: memstrack *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: microcode_ctl-fw_dir_override *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl module: mangling fw_dir Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: reset fw_dir to "/lib/firmware/updates /lib/firmware" Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel"... Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: intel: caveats check for kernel version "6.12.0-31.el10.x86_64" passed, adding "/usr/share/microcode_ctl/ucode_with_caveats/intel" to fw_dir variable Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: processing data directory "/usr/share/microcode_ctl/ucode_with_caveats/intel-06-4f-01"... Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: configuration "intel-06-4f-01" is ignored Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: microcode_ctl: final fw_dir: "/usr/share/microcode_ctl/ucode_with_caveats/intel /lib/firmware/updates /lib/firmware" Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: shutdown *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including module: squash-lib *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Including modules done *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Installing kernel module dependencies *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Installing kernel module dependencies done *** Dec 14 11:26:49 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Resolving executable dependencies *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Resolving executable dependencies done *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Hardlinking files *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Mode: real Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Method: sha256 Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Files: 537 Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Linked: 25 files Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Compared: 0 xattrs Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Compared: 48 files Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Saved: 13.58 MiB Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Duration: 0.163206 seconds Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Hardlinking files done *** Dec 14 11:26:50 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Generating early-microcode cpio image *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Constructing GenuineIntel.bin *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Constructing GenuineIntel.bin *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Store current command line parameters *** Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: Stored kernel commandline: Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: No dracut internal kernel commandline stored in the initramfs Dec 14 11:26:51 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Squashing the files inside the initramfs *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Squashing the files inside the initramfs done *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Creating image file '/boot/initramfs-6.12.0-31.el10.x86_64kdump.img' *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com dracut[1461]: *** Creating initramfs image file '/boot/initramfs-6.12.0-31.el10.x86_64kdump.img' done *** Dec 14 11:26:59 ip-10-31-43-117.us-east-1.aws.redhat.com kernel: PKCS7: Message signed outside of X.509 validity window Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: kexec: loaded kdump kernel Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Starting kdump: [OK] Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com kdumpctl[879]: kdump: Notice: No vmcore creation test performed! Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished kdump.service - Crash recovery kernel arming. ░░ Subject: A start job for unit kdump.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit kdump.service has finished successfully. ░░ ░░ The job identifier is 256. Dec 14 11:27:00 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Startup finished in 1.005s (kernel) + 3.724s (initrd) + 31.760s (userspace) = 36.490s. ░░ Subject: System start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ All system services necessary queued for starting at boot have been ░░ started. Note that this does not mean that the machine is now idle as services ░░ might still be busy with completing start-up. ░░ ░░ Kernel start-up required 1005350 microseconds. ░░ ░░ Initrd start-up required 3724049 microseconds. ░░ ░░ Userspace start-up required 31760904 microseconds. Dec 14 11:27:08 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Dec 14 11:27:51 ip-10-31-43-117.us-east-1.aws.redhat.com chronyd[664]: Selected source 216.66.48.42 (2.centos.pool.ntp.org) Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: Accepted publickey for root from 10.30.34.106 port 52592 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4322) opened. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Created slice user-0.slice - User Slice of UID 0. ░░ Subject: A start job for unit user-0.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-0.slice has finished successfully. ░░ ░░ The job identifier is 602. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting user-runtime-dir@0.service - User Runtime Directory /run/user/0... ░░ Subject: A start job for unit user-runtime-dir@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has begun execution. ░░ ░░ The job identifier is 601. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 1 of user root. ░░ Subject: A new session 1 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 1 has been created for the user root. ░░ ░░ The leading process of the session is 4322. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Finished user-runtime-dir@0.service - User Runtime Directory /run/user/0. ░░ Subject: A start job for unit user-runtime-dir@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user-runtime-dir@0.service has finished successfully. ░░ ░░ The job identifier is 601. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting user@0.service - User Manager for UID 0... ░░ Subject: A start job for unit user@0.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has begun execution. ░░ ░░ The job identifier is 681. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 2 of user root. ░░ Subject: A new session 2 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 2 has been created for the user root. ░░ ░░ The leading process of the session is 4327. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com (systemd)[4327]: pam_unix(systemd-user:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Queued start job for default target default.target. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Created slice app.slice - User Application Slice. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 5. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: grub-boot-success.timer - Mark boot as successful after the user session has run 2 minutes was skipped because of an unmet condition check (ConditionUser=!@system). ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 10. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Started systemd-tmpfiles-clean.timer - Daily Cleanup of User's Temporary Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 11. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target paths.target - Paths. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 12. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target timers.target - Timers. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 9. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Starting dbus.socket - D-Bus User Message Bus Socket... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 4. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Starting systemd-tmpfiles-setup.service - Create User Files and Directories... ░░ Subject: A start job for unit UNIT has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has begun execution. ░░ ░░ The job identifier is 8. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Finished systemd-tmpfiles-setup.service - Create User Files and Directories. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 8. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Listening on dbus.socket - D-Bus User Message Bus Socket. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 4. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target sockets.target - Sockets. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 3. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target basic.target - Basic System. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 2. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Reached target default.target - Main User Target. ░░ Subject: A start job for unit UNIT has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit UNIT has finished successfully. ░░ ░░ The job identifier is 1. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[4327]: Startup finished in 127ms. ░░ Subject: User manager start-up is now complete ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The user manager instance for user 0 has been started. All services queued ░░ for starting have been started. Note that other services might still be starting ░░ up or be started at any later time. ░░ ░░ Startup of the manager took 127888 microseconds. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started user@0.service - User Manager for UID 0. ░░ Subject: A start job for unit user@0.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit user@0.service has finished successfully. ░░ ░░ The job identifier is 681. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started session-1.scope - Session 1 of User root. ░░ Subject: A start job for unit session-1.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-1.scope has finished successfully. ░░ ░░ The job identifier is 762. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4338]: Received disconnect from 10.30.34.106 port 52592:11: disconnected by user Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4338]: Disconnected from user root 10.30.34.106 port 52592 Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4322) opened. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4322]: pam_unix(sshd:session): session closed for user root Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: session-1.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-1.scope has successfully entered the 'dead' state. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Session 1 logged out. Waiting for processes to exit. Dec 14 11:29:16 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Removed session 1. ░░ Subject: Session 1 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 1 has been terminated. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4376]: Accepted publickey for root from 10.31.8.152 port 43942 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: Accepted publickey for root from 10.31.8.152 port 43954 ssh2: RSA SHA256:W3cSdmPJK+d9RwU97ardijPXIZnxHswrpTHWW9oYtEU Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4376]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4376) opened. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4377) opened. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 3 of user root. ░░ Subject: A new session 3 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 3 has been created for the user root. ░░ ░░ The leading process of the session is 4376. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started session-3.scope - Session 3 of User root. ░░ Subject: A start job for unit session-3.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-3.scope has finished successfully. ░░ ░░ The job identifier is 844. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: New session 4 of user root. ░░ Subject: A new session 4 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 4 has been created for the user root. ░░ ░░ The leading process of the session is 4377. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started session-4.scope - Session 4 of User root. ░░ Subject: A start job for unit session-4.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-4.scope has finished successfully. ░░ ░░ The job identifier is 926. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4376]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4383]: Received disconnect from 10.31.8.152 port 43954:11: disconnected by user Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4383]: Disconnected from user root 10.31.8.152 port 43954 Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-4377) opened. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com sshd-session[4377]: pam_unix(sshd:session): session closed for user root Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: session-4.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit session-4.scope has successfully entered the 'dead' state. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Session 4 logged out. Waiting for processes to exit. Dec 14 11:29:23 ip-10-31-43-117.us-east-1.aws.redhat.com systemd-logind[653]: Removed session 4. ░░ Subject: Session 4 has been terminated ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A session with the ID 4 has been terminated. Dec 14 11:29:28 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Starting systemd-hostnamed.service - Hostname Service... ░░ Subject: A start job for unit systemd-hostnamed.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has begun execution. ░░ ░░ The job identifier is 1008. Dec 14 11:29:28 ip-10-31-43-117.us-east-1.aws.redhat.com systemd[1]: Started systemd-hostnamed.service - Hostname Service. ░░ Subject: A start job for unit systemd-hostnamed.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit systemd-hostnamed.service has finished successfully. ░░ ░░ The job identifier is 1008. Dec 14 11:29:28 managed-node1 systemd-hostnamed[5857]: Hostname set to (static) Dec 14 11:29:28 managed-node1 NetworkManager[703]: [1734193768.6492] hostname: static hostname changed from "ip-10-31-43-117.us-east-1.aws.redhat.com" to "managed-node1" Dec 14 11:29:28 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1086. Dec 14 11:29:28 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1086. Dec 14 11:29:38 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:29:58 managed-node1 systemd[1]: systemd-hostnamed.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit systemd-hostnamed.service has successfully entered the 'dead' state. Dec 14 11:30:05 managed-node1 sshd-session[6523]: Accepted publickey for root from 10.31.13.174 port 42640 ssh2: RSA SHA256:9j1blwt3wcrRiGYZQ7ZGu9axm3cDklH6/z4c+Ee8CzE Dec 14 11:30:05 managed-node1 sshd-session[6523]: pam_systemd(sshd:session): New sd-bus connection (system-bus-pam-systemd-6523) opened. Dec 14 11:30:05 managed-node1 systemd-logind[653]: New session 5 of user root. ░░ Subject: A new session 5 has been created for user root ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ Documentation: sd-login(3) ░░ ░░ A new session with the ID 5 has been created for the user root. ░░ ░░ The leading process of the session is 6523. Dec 14 11:30:05 managed-node1 systemd[1]: Started session-5.scope - Session 5 of User root. ░░ Subject: A start job for unit session-5.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit session-5.scope has finished successfully. ░░ ░░ The job identifier is 1165. Dec 14 11:30:05 managed-node1 sshd-session[6523]: pam_unix(sshd:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:30:06 managed-node1 python3.12[6679]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Dec 14 11:30:08 managed-node1 python3.12[6839]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:30:08 managed-node1 python3.12[6970]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:30:10 managed-node1 sudo[7232]: root : TTY=pts/0 ; PWD=/root ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-qkiobsjfqecahznjrwohrybwqhyobeje ; /usr/bin/python3.12 /root/.ansible/tmp/ansible-tmp-1734193809.9793763-6984-204031990791383/AnsiballZ_dnf.py' Dec 14 11:30:10 managed-node1 sudo[7232]: pam_systemd(sudo:session): New sd-bus connection (system-bus-pam-systemd-7232) opened. Dec 14 11:30:10 managed-node1 sudo[7232]: pam_unix(sudo:session): session opened for user root(uid=0) by root(uid=0) Dec 14 11:30:10 managed-node1 python3.12[7235]: ansible-ansible.legacy.dnf Invoked with name=['iptables-nft', 'podman', 'shadow-utils-subid'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:30:27 managed-node1 kernel: SELinux: Converting 384 SID table entries... Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:27 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:30:34 managed-node1 kernel: SELinux: Converting 385 SID table entries... Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:34 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:30:42 managed-node1 kernel: SELinux: Converting 385 SID table entries... Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:42 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:30:43 managed-node1 setsebool[7309]: The virt_use_nfs policy boolean was changed to 1 by root Dec 14 11:30:43 managed-node1 setsebool[7309]: The virt_sandbox_use_all_caps policy boolean was changed to 1 by root Dec 14 11:30:52 managed-node1 kernel: SELinux: Converting 388 SID table entries... Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:30:52 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:31:00 managed-node1 kernel: SELinux: Converting 388 SID table entries... Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability network_peer_controls=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability open_perms=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability extended_socket_class=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability always_check_network=0 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability cgroup_seclabel=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability nnp_nosuid_transition=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability genfs_seclabel_symlinks=1 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability ioctl_skip_cloexec=0 Dec 14 11:31:00 managed-node1 kernel: SELinux: policy capability userspace_initial_context=0 Dec 14 11:31:17 managed-node1 systemd[1]: Started run-rdcb31fbbad404dfd86db5482f938d0b1.service - /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-rdcb31fbbad404dfd86db5482f938d0b1.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-rdcb31fbbad404dfd86db5482f938d0b1.service has finished successfully. ░░ ░░ The job identifier is 1247. Dec 14 11:31:17 managed-node1 systemd[1]: Reload requested from client PID 8034 ('systemctl') (unit session-5.scope)... Dec 14 11:31:17 managed-node1 systemd[1]: Reloading... Dec 14 11:31:17 managed-node1 systemd[1]: Reloading finished in 190 ms. Dec 14 11:31:17 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1325. Dec 14 11:31:17 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Dec 14 11:31:18 managed-node1 sudo[7232]: pam_unix(sudo:session): session closed for user root Dec 14 11:31:18 managed-node1 python3.12[8229]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:19 managed-node1 python3.12[8367]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Dec 14 11:31:19 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Dec 14 11:31:19 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1325. Dec 14 11:31:19 managed-node1 systemd[1]: run-rdcb31fbbad404dfd86db5482f938d0b1.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-rdcb31fbbad404dfd86db5482f938d0b1.service has successfully entered the 'dead' state. Dec 14 11:31:20 managed-node1 python3.12[8503]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:22 managed-node1 python3.12[8636]: ansible-tempfile Invoked with prefix=lsr_ suffix=_podman state=directory path=None Dec 14 11:31:22 managed-node1 python3.12[8767]: ansible-file Invoked with path=/tmp/lsr_6ehua9m0_podman/auth state=directory mode=0700 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:23 managed-node1 python3.12[8898]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:24 managed-node1 python3.12[9029]: ansible-ansible.legacy.dnf Invoked with name=['python3-pyasn1', 'python3-cryptography', 'python3-dbus'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:31:26 managed-node1 python3.12[9165]: ansible-ansible.legacy.dnf Invoked with name=['certmonger', 'python3-packaging'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:31:27 managed-node1 dbus-broker-launch[637]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Dec 14 11:31:27 managed-node1 dbus-broker-launch[637]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Dec 14 11:31:27 managed-node1 dbus-broker-launch[637]: Noticed file-system modification, trigger reload. ░░ Subject: A configuration directory was written to ░░ Defined-By: dbus-broker ░░ Support: https://groups.google.com/forum/#!forum/bus1-devel ░░ ░░ A write was detected to one of the directories containing D-Bus configuration ░░ files, triggering a configuration reload. ░░ ░░ This functionality exists for backwards compatibility to pick up changes to ░░ D-Bus configuration without an explicit reolad request. Typically when ░░ installing or removing third-party software causes D-Bus configuration files ░░ to be added or removed. ░░ ░░ It is worth noting that this may cause partial configuration to be loaded in ░░ case dispatching this notification races with the writing of the configuration ░░ files. However, a future notification will then cause the configuration to be ░░ reladed again. Dec 14 11:31:27 managed-node1 systemd[1]: Reload requested from client PID 9173 ('systemctl') (unit session-5.scope)... Dec 14 11:31:27 managed-node1 systemd[1]: Reloading... Dec 14 11:31:28 managed-node1 systemd[1]: Reloading finished in 183 ms. Dec 14 11:31:28 managed-node1 systemd[1]: Started run-r3d4cf19d1fc24d23b770a4063f70f37f.service - /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-r3d4cf19d1fc24d23b770a4063f70f37f.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-r3d4cf19d1fc24d23b770a4063f70f37f.service has finished successfully. ░░ ░░ The job identifier is 1407. Dec 14 11:31:28 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1485. Dec 14 11:31:28 managed-node1 systemd[1]: Reload requested from client PID 9234 ('systemctl') (unit session-5.scope)... Dec 14 11:31:28 managed-node1 systemd[1]: Reloading... Dec 14 11:31:28 managed-node1 systemd[1]: Reloading finished in 291 ms. Dec 14 11:31:28 managed-node1 systemd[1]: Queuing reload/restart jobs for marked units… Dec 14 11:31:29 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Dec 14 11:31:29 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1485. Dec 14 11:31:29 managed-node1 systemd[1]: run-r3d4cf19d1fc24d23b770a4063f70f37f.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-r3d4cf19d1fc24d23b770a4063f70f37f.service has successfully entered the 'dead' state. Dec 14 11:31:29 managed-node1 python3.12[9425]: ansible-file Invoked with name=/etc/certmonger//pre-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//pre-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:29 managed-node1 python3.12[9556]: ansible-file Invoked with name=/etc/certmonger//post-scripts owner=root group=root mode=0700 state=directory path=/etc/certmonger//post-scripts recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:30 managed-node1 python3.12[9687]: ansible-ansible.legacy.systemd Invoked with name=certmonger state=started enabled=True daemon_reload=False daemon_reexec=False scope=system no_block=False force=None masked=None Dec 14 11:31:30 managed-node1 systemd[1]: Reload requested from client PID 9690 ('systemctl') (unit session-5.scope)... Dec 14 11:31:30 managed-node1 systemd[1]: Reloading... Dec 14 11:31:30 managed-node1 systemd[1]: Reloading finished in 186 ms. Dec 14 11:31:30 managed-node1 systemd[1]: Starting fstrim.service - Discard unused blocks on filesystems from /etc/fstab... ░░ Subject: A start job for unit fstrim.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has begun execution. ░░ ░░ The job identifier is 1563. Dec 14 11:31:31 managed-node1 systemd[1]: Starting certmonger.service - Certificate monitoring and PKI enrollment... ░░ Subject: A start job for unit certmonger.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has begun execution. ░░ ░░ The job identifier is 1641. Dec 14 11:31:31 managed-node1 (rtmonger)[9745]: certmonger.service: Referenced but unset environment variable evaluates to an empty string: OPTS Dec 14 11:31:31 managed-node1 systemd[1]: fstrim.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit fstrim.service has successfully entered the 'dead' state. Dec 14 11:31:31 managed-node1 systemd[1]: Finished fstrim.service - Discard unused blocks on filesystems from /etc/fstab. ░░ Subject: A start job for unit fstrim.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit fstrim.service has finished successfully. ░░ ░░ The job identifier is 1563. Dec 14 11:31:31 managed-node1 systemd[1]: Started certmonger.service - Certificate monitoring and PKI enrollment. ░░ Subject: A start job for unit certmonger.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit certmonger.service has finished successfully. ░░ ░░ The job identifier is 1641. Dec 14 11:31:31 managed-node1 python3.12[9904]: ansible-fedora.linux_system_roles.certificate_request Invoked with name=podman_registry dns=['localhost', '127.0.0.1'] directory=/etc/pki/tls wait=True ca=self-sign __header=# # Ansible managed # # system_role:certificate provider_config_directory=/etc/certmonger provider=certmonger key_usage=['digitalSignature', 'keyEncipherment'] extended_key_usage=['id-kp-serverAuth', 'id-kp-clientAuth'] auto_renew=True ip=None email=None common_name=None country=None state=None locality=None organization=None organizational_unit=None contact_email=None key_size=None owner=None group=None mode=None principal=None run_before=None run_after=None Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:31 managed-node1 certmonger[9745]: 2024-12-14 11:31:31 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 certmonger[9919]: Certificate in file "/etc/pki/tls/certs/podman_registry.crt" issued by CA and saved. Dec 14 11:31:32 managed-node1 certmonger[9745]: 2024-12-14 11:31:32 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:32 managed-node1 python3.12[10050]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Dec 14 11:31:33 managed-node1 python3.12[10181]: ansible-slurp Invoked with path=/etc/pki/tls/private/podman_registry.key src=/etc/pki/tls/private/podman_registry.key Dec 14 11:31:33 managed-node1 python3.12[10312]: ansible-slurp Invoked with path=/etc/pki/tls/certs/podman_registry.crt src=/etc/pki/tls/certs/podman_registry.crt Dec 14 11:31:34 managed-node1 python3.12[10443]: ansible-ansible.legacy.command Invoked with _raw_params=getcert stop-tracking -f /etc/pki/tls/certs/podman_registry.crt _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:34 managed-node1 certmonger[9745]: 2024-12-14 11:31:34 [9745] Wrote to /var/lib/certmonger/requests/20241214163131 Dec 14 11:31:34 managed-node1 python3.12[10575]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:34 managed-node1 python3.12[10706]: ansible-file Invoked with path=/etc/pki/tls/private/podman_registry.key state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:35 managed-node1 python3.12[10837]: ansible-file Invoked with path=/etc/pki/tls/certs/podman_registry.crt state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:35 managed-node1 python3.12[10968]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_6ehua9m0_podman/auth/registry_cert.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:31:36 managed-node1 python3.12[11073]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_6ehua9m0_podman/auth/registry_cert.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1734193895.4098308-8404-275762393983029/.source.crt _original_basename=.fdte2xv6 follow=False checksum=a56753cb72985d5015d277aab9534d583f3099c2 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:36 managed-node1 python3.12[11204]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_6ehua9m0_podman/auth/registry_key.pem follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:31:36 managed-node1 python3.12[11309]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_6ehua9m0_podman/auth/registry_key.pem mode=0600 src=/root/.ansible/tmp/ansible-tmp-1734193896.2446203-8452-271283031401177/.source.pem _original_basename=.b248p56a follow=False checksum=3c4bd2383044d864f778448dd3788c2bdf7f63a0 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:37 managed-node1 python3.12[11440]: ansible-ansible.legacy.stat Invoked with path=/tmp/lsr_6ehua9m0_podman/auth/ca.crt follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:31:37 managed-node1 python3.12[11545]: ansible-ansible.legacy.copy Invoked with dest=/tmp/lsr_6ehua9m0_podman/auth/ca.crt mode=0600 src=/root/.ansible/tmp/ansible-tmp-1734193896.9596615-8488-86842030413265/.source.crt _original_basename=.5xr0fb34 follow=False checksum=a56753cb72985d5015d277aab9534d583f3099c2 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:31:38 managed-node1 python3.12[11676]: ansible-ansible.legacy.dnf Invoked with name=['httpd-tools', 'skopeo'] state=present allow_downgrade=False allowerasing=False autoremove=False bugfix=False cacheonly=False disable_gpg_check=False disable_plugin=[] disablerepo=[] download_only=False enable_plugin=[] enablerepo=[] exclude=[] installroot=/ install_repoquery=True install_weak_deps=True security=False skip_broken=False update_cache=False update_only=False validate_certs=True sslverify=True lock_timeout=30 use_backend=auto best=None conf_file=None disable_excludes=None download_dir=None list=None nobest=None releasever=None Dec 14 11:31:42 managed-node1 systemd[1]: Started run-ra2bab39c1da445c09f883f3d116af994.service - /usr/bin/systemctl start man-db-cache-update. ░░ Subject: A start job for unit run-ra2bab39c1da445c09f883f3d116af994.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit run-ra2bab39c1da445c09f883f3d116af994.service has finished successfully. ░░ ░░ The job identifier is 1720. Dec 14 11:31:42 managed-node1 systemd[1]: Starting man-db-cache-update.service... ░░ Subject: A start job for unit man-db-cache-update.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has begun execution. ░░ ░░ The job identifier is 1798. Dec 14 11:31:42 managed-node1 systemd[1]: man-db-cache-update.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit man-db-cache-update.service has successfully entered the 'dead' state. Dec 14 11:31:42 managed-node1 systemd[1]: Finished man-db-cache-update.service. ░░ Subject: A start job for unit man-db-cache-update.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit man-db-cache-update.service has finished successfully. ░░ ░░ The job identifier is 1798. Dec 14 11:31:42 managed-node1 systemd[1]: run-ra2bab39c1da445c09f883f3d116af994.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-ra2bab39c1da445c09f883f3d116af994.service has successfully entered the 'dead' state. Dec 14 11:31:43 managed-node1 python3.12[12190]: ansible-ansible.legacy.command Invoked with _raw_params=podman run -d -p 127.0.0.1:5000:5000 --name podman_registry -v /tmp/lsr_6ehua9m0_podman/auth:/auth:Z -e REGISTRY_AUTH=htpasswd -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/registry_cert.crt -e REGISTRY_HTTP_TLS_KEY=/auth/registry_key.pem quay.io/libpod/registry:2.8.2 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:44 managed-node1 systemd[1]: var-lib-containers-storage-overlay-compat1989105179-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-compat1989105179-merged.mount has successfully entered the 'dead' state. Dec 14 11:31:44 managed-node1 kernel: evm: overlay not supported Dec 14 11:31:44 managed-node1 systemd[1]: var-lib-containers-storage-overlay-metacopy\x2dcheck2016384165-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-metacopy\x2dcheck2016384165-merged.mount has successfully entered the 'dead' state. Dec 14 11:31:44 managed-node1 podman[12191]: 2024-12-14 11:31:44.032711562 -0500 EST m=+0.082834242 system refresh Dec 14 11:31:45 managed-node1 podman[12191]: 2024-12-14 11:31:45.721997956 -0500 EST m=+1.772120507 volume create 174bec1e6ec18bfefedd3e5fc8a18b56102b9fb6012f8e02b781fd81b243eef3 Dec 14 11:31:45 managed-node1 podman[12191]: 2024-12-14 11:31:45.701531903 -0500 EST m=+1.751654658 image pull 0030ba3d620c647159c935ee778991c68ef3e51a274703753b0bc530104ef5e5 quay.io/libpod/registry:2.8.2 Dec 14 11:31:45 managed-node1 podman[12191]: 2024-12-14 11:31:45.732182371 -0500 EST m=+1.782304936 container create 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:31:45 managed-node1 kernel: bridge: filtering via arp/ip/ip6tables is no longer available by default. Update your scripts to load br_netfilter if you need this. Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7779] manager: (podman0): new Bridge device (/org/freedesktop/NetworkManager/Devices/3) Dec 14 11:31:45 managed-node1 (udev-worker)[12280]: Network interface NamePolicy= disabled on kernel command line. Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Dec 14 11:31:45 managed-node1 kernel: veth0: entered allmulticast mode Dec 14 11:31:45 managed-node1 kernel: veth0: entered promiscuous mode Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered blocking state Dec 14 11:31:45 managed-node1 kernel: podman0: port 1(veth0) entered forwarding state Dec 14 11:31:45 managed-node1 (udev-worker)[12198]: Network interface NamePolicy= disabled on kernel command line. Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7970] device (veth0): carrier: link connected Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7973] manager: (veth0): new Veth device (/org/freedesktop/NetworkManager/Devices/4) Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.7998] device (podman0): carrier: link connected Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8066] device (podman0): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8071] device (podman0): state change: unavailable -> disconnected (reason 'connection-assumed', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8078] device (podman0): Activation: starting connection 'podman0' (08e2f206-5ac2-4e2f-8306-ac90b232dcf4) Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8080] device (podman0): state change: disconnected -> prepare (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8083] device (podman0): state change: prepare -> config (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8086] device (podman0): state change: config -> ip-config (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8089] device (podman0): state change: ip-config -> ip-check (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1877. Dec 14 11:31:45 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1877. Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8659] device (podman0): state change: ip-check -> secondaries (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8662] device (podman0): state change: secondaries -> activated (reason 'none', managed-type: 'external') Dec 14 11:31:45 managed-node1 NetworkManager[703]: [1734193905.8667] device (podman0): Activation: successful, device activated. Dec 14 11:31:46 managed-node1 systemd[1]: Created slice machine.slice - Slice /machine. ░░ Subject: A start job for unit machine.slice has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit machine.slice has finished successfully. ░░ ░░ The job identifier is 1957. Dec 14 11:31:46 managed-node1 systemd[1]: Started libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope. ░░ Subject: A start job for unit libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully. ░░ ░░ The job identifier is 1956. Dec 14 11:31:46 managed-node1 systemd[1]: Started libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope - libcrun container. ░░ Subject: A start job for unit libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has finished successfully. ░░ ░░ The job identifier is 1962. Dec 14 11:31:46 managed-node1 podman[12191]: 2024-12-14 11:31:46.056393753 -0500 EST m=+2.106516450 container init 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:31:46 managed-node1 podman[12191]: 2024-12-14 11:31:46.060003186 -0500 EST m=+2.110125831 container start 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:31:46 managed-node1 python3.12[12486]: ansible-wait_for Invoked with port=5000 host=127.0.0.1 timeout=300 connect_timeout=5 delay=0 active_connection_states=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT'] state=started sleep=1 path=None search_regex=None exclude_hosts=None msg=None Dec 14 11:31:47 managed-node1 python3.12[12617]: ansible-ansible.legacy.command Invoked with _raw_params=podman logs podman_registry _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:47 managed-node1 python3.12[12755]: ansible-ansible.legacy.command Invoked with _raw_params=podman pull quay.io/libpod/testimage:20210610; podman push --authfile="/tmp/lsr_6ehua9m0_podman/auth/auth.json" --cert-dir="/tmp/lsr_6ehua9m0_podman/auth" quay.io/libpod/testimage:20210610 docker://localhost:5000/libpod/testimage:20210610 _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:49 managed-node1 podman[12757]: 2024-12-14 11:31:49.445577203 -0500 EST m=+1.809759385 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f quay.io/libpod/testimage:20210610 Dec 14 11:31:50 managed-node1 podman[12756]: 2024-12-14 11:31:49.47108663 -0500 EST m=+0.016538804 image push 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f docker://localhost:5000/libpod/testimage:20210610 Dec 14 11:31:50 managed-node1 python3.12[12918]: ansible-ansible.legacy.command Invoked with _raw_params=skopeo inspect --authfile="/tmp/lsr_6ehua9m0_podman/auth/auth.json" --cert-dir="/tmp/lsr_6ehua9m0_podman/auth" docker://localhost:5000/libpod/testimage:20210610 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:53 managed-node1 python3.12[13187]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:31:54 managed-node1 python3.12[13324]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:55 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:31:57 managed-node1 python3.12[13458]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:31:58 managed-node1 python3.12[13591]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:00 managed-node1 podman[13732]: 2024-12-14 11:32:00.132917471 -0500 EST m=+0.121086648 image pull-error localhost:5000/libpod/testimage:20210610 initializing source docker://localhost:5000/libpod/testimage:20210610: reading manifest 20210610 in localhost:5000/libpod/testimage: authentication required Dec 14 11:32:02 managed-node1 python3.12[14001]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:04 managed-node1 python3.12[14138]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:07 managed-node1 python3.12[14271]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:08 managed-node1 python3.12[14404]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:09 managed-node1 podman[14543]: 2024-12-14 11:32:09.204081254 -0500 EST m=+0.145458147 image pull-error localhost:5000/libpod/testimage:20210610 initializing source docker://localhost:5000/libpod/testimage:20210610: pinging container registry localhost:5000: Get "https://localhost:5000/v2/": tls: failed to verify certificate: x509: certificate signed by unknown authority Dec 14 11:32:12 managed-node1 python3.12[14812]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:14 managed-node1 python3.12[14949]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:16 managed-node1 python3.12[15082]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:17 managed-node1 python3.12[15215]: ansible-file Invoked with path=/etc/containers/certs.d/localhost:5000 state=directory owner=root group=0 mode=0700 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:20 managed-node1 python3.12[15582]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:21 managed-node1 python3.12[15715]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:22 managed-node1 podman[15856]: 2024-12-14 11:32:22.830406841 -0500 EST m=+0.198941135 image pull 9f9ec7f2fdef9168f74e9d057f307955db14d782cff22ded51d277d74798cb2f localhost:5000/libpod/testimage:20210610 Dec 14 11:32:23 managed-node1 python3.12[16001]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:23 managed-node1 python3.12[16132]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:24 managed-node1 python3.12[16263]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:32:24 managed-node1 python3.12[16368]: ansible-ansible.legacy.copy Invoked with dest=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml owner=root group=0 mode=0644 src=/root/.ansible/tmp/ansible-tmp-1734193943.8234994-10632-256821299067559/.source.yml _original_basename=.h1hpyflq follow=False checksum=fb0097683a2e5c8909a8037d64ddc1b350aed0be backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:25 managed-node1 python3.12[16499]: ansible-containers.podman.podman_play Invoked with state=started kube_file=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Dec 14 11:32:25 managed-node1 python3.12[16643]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:28 managed-node1 python3.12[16906]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:29 managed-node1 python3.12[17043]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:32 managed-node1 python3.12[17176]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:34 managed-node1 python3.12[17309]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:36 managed-node1 python3.12[17442]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:37 managed-node1 python3.12[17575]: ansible-ansible.legacy.command Invoked with _raw_params=systemd-escape --template podman-kube@.service /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:38 managed-node1 python3.12[17707]: ansible-systemd Invoked with name=podman-kube@-etc-containers-ansible\x2dkubernetes.d-auth_test_1_kube.yml.service scope=system state=stopped enabled=False daemon_reload=False daemon_reexec=False no_block=False force=None masked=None Dec 14 11:32:38 managed-node1 python3.12[17840]: ansible-stat Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:39 managed-node1 python3.12[17973]: ansible-containers.podman.podman_play Invoked with state=absent kube_file=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml executable=podman annotation=None kube_file_content=None authfile=None build=None cert_dir=None configmap=None context_dir=None seccomp_profile_root=None username=None password=NOT_LOGGING_PARAMETER log_driver=None log_opt=None network=None tls_verify=None debug=None quiet=None recreate=None userns=None log_level=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None Dec 14 11:32:39 managed-node1 python3.12[17973]: ansible-containers.podman.podman_play version: 5.3.1, kube file /etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml Dec 14 11:32:40 managed-node1 python3.12[18117]: ansible-file Invoked with path=/etc/containers/ansible-kubernetes.d/auth_test_1_kube.yml state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:41 managed-node1 python3.12[18248]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:43 managed-node1 python3.12[18381]: ansible-systemd Invoked with name=auth_test_1_quadlet.service scope=system state=stopped enabled=False force=True daemon_reload=False daemon_reexec=False no_block=False masked=None Dec 14 11:32:43 managed-node1 python3.12[18513]: ansible-stat Invoked with path=/etc/containers/systemd/auth_test_1_quadlet.container follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:44 managed-node1 python3.12[18644]: ansible-file Invoked with path=/etc/containers/systemd/auth_test_1_quadlet.container state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:46 managed-node1 python3.12[18775]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:49 managed-node1 python3.12[19170]: ansible-file Invoked with path=/root/.config/containers state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:50 managed-node1 python3.12[19301]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:32:55 managed-node1 python3.12[20220]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:56 managed-node1 python3.12[20357]: ansible-getent Invoked with database=passwd key=auth_test_user1 fail_key=False service=None split=None Dec 14 11:32:57 managed-node1 python3.12[20489]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:58 managed-node1 python3.12[20621]: ansible-user Invoked with name=auth_test_user1 state=absent non_unique=False force=False remove=False create_home=True system=False move_home=False append=False ssh_key_bits=0 ssh_key_type=rsa ssh_key_comment=ansible-generated on managed-node1 update_password=always uid=None group=None groups=None comment=None home=None shell=None password=NOT_LOGGING_PARAMETER login_class=None password_expire_max=None password_expire_min=None password_expire_warn=None hidden=None seuser=None skeleton=None generate_ssh_key=None ssh_key_file=None ssh_key_passphrase=NOT_LOGGING_PARAMETER expires=None password_lock=None local=None profile=None authorization=None role=None umask=None Dec 14 11:32:58 managed-node1 python3.12[20753]: ansible-file Invoked with path=/home/auth_test_user1 state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:32:58 managed-node1 rsyslogd[651]: imjournal: journal files changed, reloading... [v8.2408.0-2.el10 try https://www.rsyslog.com/e/0 ] Dec 14 11:32:59 managed-node1 python3.12[20885]: ansible-ansible.legacy.command Invoked with _raw_params=podman inspect podman_registry --format '{{range .}}{{range .Mounts}}{{if eq .Type "volume"}}{{.Name}}{{end}}{{end}}{{end}}' _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:59 managed-node1 python3.12[21023]: ansible-ansible.legacy.command Invoked with _raw_params=podman rm -f podman_registry _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:32:59 managed-node1 systemd[1]: libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 podman[21024]: 2024-12-14 11:32:59.789115628 -0500 EST m=+0.041007423 container died 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:32:59 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Dec 14 11:32:59 managed-node1 kernel: veth0 (unregistering): left allmulticast mode Dec 14 11:32:59 managed-node1 kernel: veth0 (unregistering): left promiscuous mode Dec 14 11:32:59 managed-node1 kernel: podman0: port 1(veth0) entered disabled state Dec 14 11:32:59 managed-node1 NetworkManager[703]: [1734193979.8303] device (podman0): state change: activated -> unmanaged (reason 'unmanaged', managed-type: 'removed') Dec 14 11:32:59 managed-node1 systemd[1]: Starting NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service... ░░ Subject: A start job for unit NetworkManager-dispatcher.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has begun execution. ░░ ░░ The job identifier is 1969. Dec 14 11:32:59 managed-node1 systemd[1]: Started NetworkManager-dispatcher.service - Network Manager Script Dispatcher Service. ░░ Subject: A start job for unit NetworkManager-dispatcher.service has finished successfully ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit NetworkManager-dispatcher.service has finished successfully. ░░ ░░ The job identifier is 1969. Dec 14 11:32:59 managed-node1 systemd[1]: run-netns-netns\x2d85bf4814\x2dacfa\x2db691\x2da100\x2d70dfbf3554ab.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit run-netns-netns\x2d85bf4814\x2dacfa\x2db691\x2da100\x2d70dfbf3554ab.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 systemd[1]: var-lib-containers-storage-overlay\x2dcontainers-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a-userdata-shm.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay\x2dcontainers-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a-userdata-shm.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 systemd[1]: var-lib-containers-storage-overlay-caa7ce3c9d2ffccfe015c47869a4faf042de89e661c34084c6c65ad743feba41-merged.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay-caa7ce3c9d2ffccfe015c47869a4faf042de89e661c34084c6c65ad743feba41-merged.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 podman[21024]: 2024-12-14 11:32:59.914889749 -0500 EST m=+0.166781463 container remove 013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a (image=quay.io/libpod/registry:2.8.2, name=podman_registry) Dec 14 11:32:59 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:32:59 managed-node1 systemd[1]: libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit libpod-conmon-013623ab41c550d739cb44f590fc1fde1e9557fa6fbbb3a443ef1d0bf0f6f57a.scope has successfully entered the 'dead' state. Dec 14 11:33:00 managed-node1 python3.12[21191]: ansible-ansible.legacy.command Invoked with _raw_params=podman volume rm 174bec1e6ec18bfefedd3e5fc8a18b56102b9fb6012f8e02b781fd81b243eef3 _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:00 managed-node1 podman[21192]: 2024-12-14 11:33:00.361517439 -0500 EST m=+0.026007234 volume remove 174bec1e6ec18bfefedd3e5fc8a18b56102b9fb6012f8e02b781fd81b243eef3 Dec 14 11:33:00 managed-node1 python3.12[21330]: ansible-file Invoked with path=/tmp/lsr_6ehua9m0_podman state=absent recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:33:03 managed-node1 python3.12[21504]: ansible-ansible.legacy.setup Invoked with gather_subset=['all'] gather_timeout=10 filter=[] fact_path=/etc/ansible/facts.d Dec 14 11:33:04 managed-node1 python3.12[21664]: ansible-stat Invoked with path=/run/ostree-booted follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:05 managed-node1 python3.12[21795]: ansible-stat Invoked with path=/sbin/transactional-update follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:07 managed-node1 python3.12[22057]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:09 managed-node1 python3.12[22195]: ansible-getent Invoked with database=passwd key=root fail_key=False service=None split=None Dec 14 11:33:09 managed-node1 python3.12[22327]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:09 managed-node1 systemd[1]: NetworkManager-dispatcher.service: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit NetworkManager-dispatcher.service has successfully entered the 'dead' state. Dec 14 11:33:13 managed-node1 python3.12[22461]: ansible-stat Invoked with path=/usr/bin/getsubids follow=False get_checksum=True get_mime=True get_attributes=True checksum_algorithm=sha1 Dec 14 11:33:15 managed-node1 python3.12[22594]: ansible-file Invoked with path=/etc/containers/systemd state=directory owner=root group=0 mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:33:15 managed-node1 python3.12[22725]: ansible-ansible.legacy.stat Invoked with path=/etc/containers/systemd/quadlet-pod-pod.pod follow=False get_checksum=True get_size=False checksum_algorithm=sha1 get_mime=True get_attributes=True Dec 14 11:33:16 managed-node1 python3.12[22830]: ansible-ansible.legacy.copy Invoked with src=/root/.ansible/tmp/ansible-tmp-1734193995.4057543-13488-229431513263554/.source.pod dest=/etc/containers/systemd/quadlet-pod-pod.pod owner=root group=0 mode=0644 follow=False _original_basename=systemd.j2 checksum=1884c880482430d8bf2e944b003734fb8b7a462d backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None Dec 14 11:33:17 managed-node1 python3.12[22961]: ansible-systemd Invoked with daemon_reload=True scope=system daemon_reexec=False no_block=False name=None state=None enabled=None force=None masked=None Dec 14 11:33:17 managed-node1 systemd[1]: Reload requested from client PID 22962 ('systemctl') (unit session-5.scope)... Dec 14 11:33:17 managed-node1 systemd[1]: Reloading... Dec 14 11:33:17 managed-node1 systemd[1]: Reloading finished in 198 ms. Dec 14 11:33:18 managed-node1 python3.12[23145]: ansible-systemd Invoked with name=quadlet-pod-pod-pod.service scope=system state=started daemon_reload=False daemon_reexec=False no_block=False enabled=None force=None masked=None Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2048. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23149]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23149]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:18 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23158]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23158]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:18 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:18 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2048 and the job result is failed. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 1. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2132. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23187]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23187]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23215]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23215]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:18 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2132 and the job result is failed. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 2. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2216. Dec 14 11:33:18 managed-node1 python3.12[23312]: ansible-ansible.legacy.command Invoked with _raw_params=set -x set -o pipefail exec 1>&2 #podman volume rm --all #podman network prune -f podman volume ls podman network ls podman secret ls podman container ls podman pod ls podman images systemctl list-units | grep quadlet systemctl list-unit-files | grep quadlet ls -alrtF /etc/containers/systemd /usr/libexec/podman/quadlet -dryrun -v -no-kmsg-log _uses_shell=True expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23313]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23313]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23329]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23329]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:18 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2216 and the job result is failed. Dec 14 11:33:18 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 3. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:18 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2300. Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23377]: time="2024-12-14T11:33:18-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:18 managed-node1 quadlet-pod-pod-pod[23377]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:19 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23386]: time="2024-12-14T11:33:19-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23386]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:19 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:19 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2300 and the job result is failed. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 4. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:19 managed-node1 systemd[1]: Starting quadlet-pod-pod-pod.service... ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has begun execution ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has begun execution. ░░ ░░ The job identifier is 2384. Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23488]: time="2024-12-14T11:33:19-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23488]: Error: building local pause image: finding pause binary: exec: "catatonit": executable file not found in $PATH Dec 14 11:33:19 managed-node1 systemd[1]: var-lib-containers-storage-overlay.mount: Deactivated successfully. ░░ Subject: Unit succeeded ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit var-lib-containers-storage-overlay.mount has successfully entered the 'dead' state. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStartPre= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23519]: time="2024-12-14T11:33:19-05:00" level=warning msg="Failed to decode the keys [\"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options\" \"storage.options.overlay.pull_options.enable_partial_images\" \"storage.options.overlay.pull_options.use_hard_links\" \"storage.options.overlay.pull_options.ostree_repos\" \"storage.options.overlay.pull_options.convert_images\"] from \"/usr/share/containers/storage.conf\"" Dec 14 11:33:19 managed-node1 quadlet-pod-pod-pod[23519]: Error: reading pod ID file: open /run/quadlet-pod-pod-pod.pod-id: no such file or directory Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Control process exited, code=exited, status=125/n/a ░░ Subject: Unit process exited ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ An ExecStopPost= process belonging to unit quadlet-pod-pod-pod.service has exited. ░░ ░░ The process' exit code is 'exited' and its exit status is 125. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:19 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2384 and the job result is failed. Dec 14 11:33:19 managed-node1 python3.12[23544]: ansible-ansible.legacy.command Invoked with _raw_params=grep type=AVC /var/log/audit/audit.log _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Scheduled restart job, restart counter is at 5. ░░ Subject: Automatic restarting of a unit has been scheduled ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ Automatic restarting of the unit quadlet-pod-pod-pod.service has been scheduled, as the result for ░░ the configured Restart= setting for the unit. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Start request repeated too quickly. Dec 14 11:33:19 managed-node1 systemd[1]: quadlet-pod-pod-pod.service: Failed with result 'exit-code'. ░░ Subject: Unit failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ The unit quadlet-pod-pod-pod.service has entered the 'failed' state with result 'exit-code'. Dec 14 11:33:19 managed-node1 systemd[1]: Failed to start quadlet-pod-pod-pod.service. ░░ Subject: A start job for unit quadlet-pod-pod-pod.service has failed ░░ Defined-By: systemd ░░ Support: https://access.redhat.com/support ░░ ░░ A start job for unit quadlet-pod-pod-pod.service has finished with a failure. ░░ ░░ The job identifier is 2468 and the job result is failed. Dec 14 11:33:20 managed-node1 python3.12[23678]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:22 managed-node1 python3.12[23941]: ansible-ansible.legacy.command Invoked with _raw_params=podman --version _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None Dec 14 11:33:23 managed-node1 python3.12[24078]: ansible-getent Invoked with database=passwd key=user_quadlet_pod fail_key=False service=None split=None Dec 14 11:33:24 managed-node1 python3.12[24210]: ansible-ansible.legacy.command Invoked with _raw_params=journalctl -ex _uses_shell=False expand_argument_vars=True stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None PLAY RECAP ********************************************************************* managed-node1 : ok=49 changed=1 unreachable=0 failed=2 skipped=67 rescued=2 ignored=0 TASKS RECAP ******************************************************************** Saturday 14 December 2024 11:33:24 -0500 (0:00:00.445) 0:00:21.958 ***** =============================================================================== Gathering Facts --------------------------------------------------------- 1.46s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:9 fedora.linux_system_roles.podman : Gather the package facts ------------- 1.25s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Reload systemctl --------------------- 1.19s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:82 Debug3 ------------------------------------------------------------------ 1.11s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:127 fedora.linux_system_roles.podman : Ensure quadlet file is present ------- 1.10s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:70 fedora.linux_system_roles.podman : Gather the package facts ------------- 0.84s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:6 fedora.linux_system_roles.podman : Start service ------------------------ 0.75s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:110 fedora.linux_system_roles.podman : Get podman version ------------------- 0.66s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 fedora.linux_system_roles.podman : Check if system is ostree ------------ 0.60s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:11 fedora.linux_system_roles.podman : Get user information ----------------- 0.60s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 fedora.linux_system_roles.podman : Ensure the quadlet directory is present --- 0.52s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/create_update_quadlet_spec.yml:39 Check AVCs -------------------------------------------------------------- 0.51s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:146 fedora.linux_system_roles.podman : Get podman version ------------------- 0.49s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:46 fedora.linux_system_roles.podman : See if getsubids exists -------------- 0.47s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:28 Dump journal ------------------------------------------------------------ 0.47s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:151 fedora.linux_system_roles.podman : Get user information ----------------- 0.46s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:2 Dump journal ------------------------------------------------------------ 0.45s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/tests/podman/tests_quadlet_pod.yml:194 fedora.linux_system_roles.podman : See if getsubids exists -------------- 0.44s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/handle_user_group.yml:28 fedora.linux_system_roles.podman : Check if transactional-update exists in /sbin --- 0.41s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/set_vars.yml:23 fedora.linux_system_roles.podman : Podman package version must be 4.2 or later --- 0.21s /tmp/collections-9ny/ansible_collections/fedora/linux_system_roles/roles/podman/tasks/main.yml:56