Logreduce

CountFilenameCompared too
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/nova/nova-api.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/nova/nova-api.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/nova/nova-api.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/nova/nova-api.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/nova/nova-api.log.txt.gz
3/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/tempest_output.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/home/jenkins/tempest_output.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/home/jenkins/tempest_output.log.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/mistral/executor.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/mistral/executor.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/mistral/executor.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/mistral/executor.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/mistral/executor.log.txt.gz
9/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/messages.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/messages.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/messages.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/messages.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/messages.txt.gz
9/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/journal.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/journal.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/journal.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/journal.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/journal.txt.gz
7/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/host_info.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/host_info.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/host_info.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/host_info.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/host_info.txt.gz
8/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/console.html/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/console.html /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/console.html
3/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/overcloud_deploy.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/home/jenkins/overcloud_deploy.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/home/jenkins/overcloud_deploy.log.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/tempest/tempest.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/home/jenkins/tempest/tempest.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/home/jenkins/tempest/tempest.log.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/ps.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/unbound_log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/listen53.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/deprecations.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/syslog.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/sudoers.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/iptables.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/resolv_conf.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/rpm-qa.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/ps.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/pip2-freeze.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/unbound_log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/listen53.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/deprecations.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/syslog.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/sudoers.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/iptables.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/resolv_conf.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/rpm-qa.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/ps.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/pip2-freeze.txt.gz
4/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/dmesg.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/dmesg.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/dmesg.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/dmesg.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/dmesg.txt.gz
4/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/dmesg.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/dmesg.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/dmesg.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/dmesg.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/dmesg.txt.gz
13/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/messages.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/messages.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/messages.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/messages.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/messages.txt.gz
14/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/journal.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/journal.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/journal.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/journal.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/journal.txt.gz
3/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/secure.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/secure.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/secure.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/secure.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/secure.txt.gz
2/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/heat/heat-engine.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/heat/heat-engine.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/heat/heat-engine.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/heat/heat-engine.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/heat/heat-engine.log.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/audit/audit.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/audit/audit.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/audit/audit.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/audit/audit.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/audit/audit.log.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/audit/audit.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/audit/audit.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/audit/audit.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/audit/audit.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/audit/audit.log.txt.gz
6/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/repo_setup.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/home/jenkins/repo_setup.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/home/jenkins/repo_setup.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/home/jenkins/repo_setup.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/home/jenkins/repo_setup.log.txt.gz
16/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/home/jenkins/repo_setup.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/home/jenkins/repo_setup.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/home/jenkins/repo_setup.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/home/jenkins/repo_setup.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/home/jenkins/repo_setup.log.txt.gz
4/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/postci.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/postci.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/postci.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/bootstrap-subnodes.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/bootstrap-subnodes.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/bootstrap-subnodes.log.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/cluster/corosync.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/cluster/corosync.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/cluster/corosync.log.txt.gz
1/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/heat-deploy-times.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/heat-deploy-times.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/heat-deploy-times.log.txt.gz
76/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/mistral/engine.log.txt.gz/tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/undercloud/var/log/mistral/engine.log.txt.gz /tmp//logs.openstack.org/43/497543/11/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/9ee12c3/logs/subnode-2/var/log/mistral/engine.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/undercloud/var/log/mistral/engine.log.txt.gz /tmp//logs.openstack.org/24/504124/1/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/da8f491/logs/subnode-2/var/log/mistral/engine.log.txt.gz
/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/nova/nova-api.log.txt.gz
0.000 | 3850: 2017-09-14 19:47:41.993 57559 DEBUG nova.api.openstack.wsgi [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 6172973c61924f49acf4fd5cbbf58e03 57da950fcf4e4a689e10172f79e6cfb2 - default default] Action: 'create', calling method: <bound method ServersController.create of <nova.api.openstack.compute.servers.ServersController object at 0x7f66029064d0>>, body: {"server": {"name": "tempest-TestNetworkBasicOps-server-966396963", "imageRef": "6c689e91-3408-451e-bc58-7c03eddd0028", "key_name": "tempest-TestNetworkBasicOps-198610858", "flavorRef": "8f773c57-4f7b-49f3-bef7-4fda99ada582", "networks": [{"uuid": "87338d55-159e-4fd4-9429-080e9bfff009"}], "security_groups": [{"name": "tempest-secgroup-smoke-1905469331"}]}} _process_stack /usr/lib/python2.7/site-packages/nova/api/openstack/wsgi.py:609
0.000 | 3851: 2017-09-14 19:47:42.005 57559 DEBUG oslo_concurrency.lockutils [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 6172973c61924f49acf4fd5cbbf58e03 57da950fcf4e4a689e10172f79e6cfb2 - default default] Lock "00000000-0000-0000-0000-000000000000" acquired by "nova.context.get_or_set_cached_cell_and_set_connections" :: waited 0.000s inner /usr/lib/python2.7/site-packages/oslo_concurrency/lockutils.py:270
0.000 | 3852: 2017-09-14 19:47:42.006 57559 DEBUG oslo_concurrency.lockutils [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 6172973c61924f49acf4fd5cbbf58e03 57da950fcf4e4a689e10172f79e6cfb2 - default default] Lock "00000000-0000-0000-0000-000000000000" released by "nova.context.get_or_set_cached_cell_and_set_connections" :: held 0.002s inner /usr/lib/python2.7/site-packages/oslo_concurrency/lockutils.py:282
0.429 | 3853: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 6172973c61924f49acf4fd5cbbf58e03 57da950fcf4e4a689e10172f79e6cfb2 - default default] Unexpected exception in API method: ParsingError: File contains parsing errors: /etc/my.cnf
0.569 | 3854: [line 17]: '!includedir /etc/my.cnf.d
0.569 | 3854: '
0.429 | 3855: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions Traceback (most recent call last):
0.365 | 3856: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/openstack/extensions.py", line 336, in wrapped
0.523 | 3857: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return f(*args, **kwargs)
0.533 | 3858: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/validation/__init__.py", line 108, in wrapper
0.523 | 3859: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return func(*args, **kwargs)
0.533 | 3860: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/validation/__init__.py", line 108, in wrapper
0.523 | 3861: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return func(*args, **kwargs)
0.533 | 3862: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/validation/__init__.py", line 108, in wrapper
0.523 | 3863: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return func(*args, **kwargs)
0.533 | 3864: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/validation/__init__.py", line 108, in wrapper
0.523 | 3865: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return func(*args, **kwargs)
0.533 | 3866: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/validation/__init__.py", line 108, in wrapper
0.523 | 3867: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return func(*args, **kwargs)
0.533 | 3868: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/validation/__init__.py", line 108, in wrapper
0.523 | 3869: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return func(*args, **kwargs)
0.533 | 3870: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/validation/__init__.py", line 108, in wrapper
0.523 | 3871: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return func(*args, **kwargs)
0.580 | 3872: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/api/openstack/compute/servers.py", line 494, in create
0.440 | 3873: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions nova_context.get_admin_context(), ['nova-compute'])
0.578 | 3874: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/objects/service.py", line 447, in get_minimum_version_all_cells
0.429 | 3875: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions cctxt, binaries)
0.622 | 3876: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_versionedobjects/base.py", line 184, in wrapper
0.597 | 3877: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions result = fn(cls, context, *args, **kwargs)
0.578 | 3878: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/objects/service.py", line 413, in get_minimum_version_multi
0.469 | 3879: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions context, binaries, use_slave=use_slave)
0.565 | 3880: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/nova/db/sqlalchemy/api.py", line 234, in wrapper
0.517 | 3881: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions with reader_mode.using(context):
0.525 | 3882: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
0.523 | 3883: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return self.gen.next()
0.592 | 3884: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 1028, in _transaction_scope
0.605 | 3885: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions context=context) as resource:
0.525 | 3886: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
0.523 | 3887: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return self.gen.next()
0.592 | 3888: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 633, in _session
0.567 | 3889: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions bind=self.connection, mode=self.mode)
0.592 | 3890: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 398, in _create_session
0.523 | 3891: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions self._start()
0.592 | 3892: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 484, in _start
0.429 | 3893: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions engine_args, maker_args)
0.592 | 3894: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 508, in _setup_for_connection
0.429 | 3895: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions sql_connection=sql_connection, **engine_kwargs)
0.577 | 3896: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/engines.py", line 179, in create_engine
0.568 | 3897: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions test_conn = _test_connection(engine, max_retries, retry_interval)
0.577 | 3898: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/engines.py", line 357, in _test_connection
0.429 | 3899: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return engine.connect()
0.604 | 3900: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2091, in connect
0.470 | 3901: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return self._connection_cls(self, **kwargs)
0.604 | 3902: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 90, in __init__
0.526 | 3903: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions if connection is not None else engine.raw_connection()
0.604 | 3904: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2177, in raw_connection
0.523 | 3905: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions self.pool.unique_connection, _connection)
0.604 | 3906: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2147, in _wrap_pool_connect
0.429 | 3907: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return fn()
0.562 | 3908: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 328, in unique_connection
0.523 | 3909: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return _ConnectionFairy._checkout(self)
0.562 | 3910: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 766, in _checkout
0.429 | 3911: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions fairy = _ConnectionRecord.checkout(pool)
0.562 | 3912: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 516, in checkout
0.429 | 3913: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions rec = pool._do_get()
0.562 | 3914: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 1138, in _do_get
0.523 | 3915: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions self._dec_overflow()
0.562 | 3916: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/util/langhelpers.py", line 66, in __exit__
0.429 | 3917: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions compat.reraise(exc_type, exc_value, exc_tb)
0.562 | 3918: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 1135, in _do_get
0.523 | 3919: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return self._create_connection()
0.562 | 3920: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 333, in _create_connection
0.523 | 3921: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return _ConnectionRecord(self)
0.562 | 3922: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 461, in __init__
0.545 | 3923: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions self.__connect(first_connect_check=True)
0.562 | 3924: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 651, in __connect
0.575 | 3925: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions connection = pool._invoke_creator(self)
0.562 | 3926: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/strategies.py", line 105, in connect
0.429 | 3927: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return dialect.connect(*cargs, **cparams)
0.485 | 3928: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py", line 393, in connect
0.523 | 3929: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return self.dbapi.connect(*cargs, **cparams)
0.531 | 3930: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/pymysql/__init__.py", line 90, in Connect
0.523 | 3931: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions return Connection(*args, **kwargs)
0.531 | 3932: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib/python2.7/site-packages/pymysql/connections.py", line 618, in __init__
0.429 | 3933: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions cfg.read(os.path.expanduser(read_default_file))
0.525 | 3934: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/ConfigParser.py", line 305, in read
0.523 | 3935: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions self._read(fp, filename)
0.525 | 3936: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions File "/usr/lib64/python2.7/ConfigParser.py", line 546, in _read
0.429 | 3937: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions raise e
0.490 | 3938: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions ParsingError: File contains parsing errors: /etc/my.cnf
0.561 | 3939: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions [line 17]: '!includedir /etc/my.cnf.d
0.561 | 3939: '
0.429 | 3940: 2017-09-14 19:47:42.010 57559 ERROR nova.api.openstack.extensions
0.502 | 3941: 2017-09-14 19:47:42.021 57559 INFO nova.api.openstack.wsgi [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 6172973c61924f49acf4fd5cbbf58e03 57da950fcf4e4a689e10172f79e6cfb2 - default default] HTTP exception thrown: Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ and attach the Nova API log if possible.
0.641 | 3942: <class 'ConfigParser.ParsingError'>
0.521 | 3943: 2017-09-14 19:47:42.022 57559 DEBUG nova.api.openstack.wsgi [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 6172973c61924f49acf4fd5cbbf58e03 57da950fcf4e4a689e10172f79e6cfb2 - default default] Returning 500 to user: Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ and attach the Nova API log if possible.
0.630 | 3944: <class 'ConfigParser.ParsingError'> __call__ /usr/lib/python2.7/site-packages/nova/api/openstack/wsgi.py:1029
0.000 | 3945: 2017-09-14 19:47:42.023 57559 INFO nova.api.openstack.requestlog [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 6172973c61924f49acf4fd5cbbf58e03 57da950fcf4e4a689e10172f79e6cfb2 - default default] 192.168.24.3 "POST /v2.1/servers" status: 500 len: 199 microversion: 2.1 time: 0.034854

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/tempest_output.log.txt.gz
0.000 | 1945: 2017-09-14 19:47:48 | {0} tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops [17.706163s] ... FAILED
0.000 | 1946: 2017-09-14 19:47:51 |
0.000 | 1947: 2017-09-14 19:47:51 | ==============================
0.293 | 1948: 2017-09-14 19:47:51 | Failed 1 tests - output below:
0.000 | 1949: 2017-09-14 19:47:51 | ==============================

0.000 | 1951: 2017-09-14 19:47:51 | tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
0.000 | 1952: 2017-09-14 19:47:51 | -------------------------------------------------------------------------------------------------------------------------------------------------
0.000 | 1953: 2017-09-14 19:47:51 |
1.000 | 1954: 2017-09-14 19:47:51 | Captured traceback:
0.000 | 1955: 2017-09-14 19:47:51 | ~~~~~~~~~~~~~~~~~~~
1.000 | 1956: 2017-09-14 19:47:51 | Traceback (most recent call last):
0.399 | 1957: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/common/utils/__init__.py", line 89, in wrapper
1.000 | 1958: 2017-09-14 19:47:51 | return f(self, *func_args, **func_kwargs)
0.244 | 1959: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/scenario/test_network_basic_ops.py", line 398, in test_network_basic_ops
1.000 | 1960: 2017-09-14 19:47:51 | self._setup_network_and_servers()
0.422 | 1961: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/scenario/test_network_basic_ops.py", line 119, in _setup_network_and_servers
0.468 | 1962: 2017-09-14 19:47:51 | server = self._create_server(self.network, port_id)
0.422 | 1963: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/scenario/test_network_basic_ops.py", line 166, in _create_server
0.592 | 1964: 2017-09-14 19:47:51 | security_groups=security_groups)
0.375 | 1965: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/scenario/manager.py", line 205, in create_server
1.000 | 1966: 2017-09-14 19:47:51 | image_id=image_id, **kwargs)
0.412 | 1967: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/common/compute.py", line 189, in create_test_server
0.000 | 1968: 2017-09-14 19:47:51 | **kwargs)
0.347 | 1969: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/lib/services/compute/servers_client.py", line 89, in create_server
0.536 | 1970: 2017-09-14 19:47:51 | resp, body = self.post('servers', post_body)
0.443 | 1971: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/lib/common/rest_client.py", line 270, in post
0.550 | 1972: 2017-09-14 19:47:51 | return self.request('POST', url, extra_headers, headers, body, chunked)
0.347 | 1973: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/lib/services/compute/base_compute_client.py", line 48, in request
1.000 | 1974: 2017-09-14 19:47:51 | method, url, extra_headers, headers, body, chunked)
0.443 | 1975: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/lib/common/rest_client.py", line 659, in request
1.000 | 1976: 2017-09-14 19:47:51 | self._error_checker(resp, resp_body)
0.443 | 1977: 2017-09-14 19:47:51 | File "/usr/lib/python2.7/site-packages/tempest/lib/common/rest_client.py", line 836, in _error_checker
1.000 | 1978: 2017-09-14 19:47:51 | message=message)
0.642 | 1979: 2017-09-14 19:47:51 | tempest.lib.exceptions.ServerFault: Got server fault
0.615 | 1980: 2017-09-14 19:47:51 | Details: Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ and attach the Nova API log if possible.
1.000 | 1981: 2017-09-14 19:47:51 | <class 'ConfigParser.ParsingError'>
0.000 | 1982: 2017-09-14 19:47:51 |
0.000 | 1983: 2017-09-14 19:47:51 |
1.000 | 1984: 2017-09-14 19:47:51 | Captured pythonlogging:
0.000 | 1985: 2017-09-14 19:47:51 | ~~~~~~~~~~~~~~~~~~~~~~~
0.252 | 1986: 2017-09-14 19:47:51 | 2017-09-14 19:47:31,768 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/networks 0.880s
0.343 | 1987: 2017-09-14 19:47:51 | 2017-09-14 19:47:31,768 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.455 | 1988: 2017-09-14 19:47:51 | Body: {"network": {"tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "name": "tempest-network-smoke--1596822762"}}
0.553 | 1989: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '591', 'content-location': 'http://192.168.24.9:9696/v2.0/networks', u'date': 'Thu, 14 Sep 2017 19:47:31 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-c24396af-ab61-4575-904a-2c6490d44f54'}
0.468 | 1990: 2017-09-14 19:47:51 | Body: {"network":{"ipv6_address_scope":null,"revision_number":3,"port_security_enabled":true,"id":"87338d55-159e-4fd4-9429-080e9bfff009","router:external":false,"availability_zone_hints":[],"availability_zones":[],"ipv4_address_scope":null,"shared":false,"project_id":"57da950fcf4e4a689e10172f79e6cfb2","status":"ACTIVE","subnets":[],"description":"","tags":[],"updated_at":"2017-09-14T19:47:31Z","is_default":false,"name":"tempest-network-smoke--1596822762","qos_policy_id":null,"admin_state_up":true,"tenant_id":"57da950fcf4e4a689e10172f79e6cfb2","created_at":"2017-09-14T19:47:31Z","mtu":1450}}
0.223 | 1991: 2017-09-14 19:47:51 | 2017-09-14 19:47:31,940 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/routers 0.170s
0.343 | 1992: 2017-09-14 19:47:51 | 2017-09-14 19:47:31,940 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.572 | 1993: 2017-09-14 19:47:51 | Body: {"router": {"tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "name": "tempest-router-smoke-1111684309", "admin_state_up": true}}
0.521 | 1994: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '494', 'content-location': 'http://192.168.24.9:9696/v2.0/routers', u'date': 'Thu, 14 Sep 2017 19:47:31 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-9f111872-835a-486f-9b1a-122f09be8327'}
0.486 | 1995: 2017-09-14 19:47:51 | Body: {"router": {"status": "ACTIVE", "external_gateway_info": null, "availability_zone_hints": [], "availability_zones": [], "description": "", "tags": [], "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:31Z", "admin_state_up": true, "updated_at": "2017-09-14T19:47:31Z", "flavor_id": null, "revision_number": 1, "routes": [], "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea", "name": "tempest-router-smoke-1111684309"}}
0.271 | 1996: 2017-09-14 19:47:51 | 2017-09-14 19:47:34,624 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 200 PUT http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea 2.682s
0.343 | 1997: 2017-09-14 19:47:51 | 2017-09-14 19:47:34,624 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.418 | 1998: 2017-09-14 19:47:51 | Body: {"router": {"external_gateway_info": {"network_id": "4dbcb14a-ce6a-4a93-a502-0ec748998fdd"}}}
0.521 | 1999: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '676', 'content-location': 'http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea', u'date': 'Thu, 14 Sep 2017 19:47:34 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-a210894a-36cd-4f6e-8d88-e648848c09be'}
0.546 | 2000: 2017-09-14 19:47:51 | Body: {"router": {"status": "ACTIVE", "external_gateway_info": {"network_id": "4dbcb14a-ce6a-4a93-a502-0ec748998fdd", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "7f008882-6ada-4850-a62a-a1b870f18e63", "ip_address": "192.168.24.107"}]}, "availability_zone_hints": [], "availability_zones": [], "description": "", "tags": [], "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:31Z", "admin_state_up": true, "updated_at": "2017-09-14T19:47:34Z", "flavor_id": null, "revision_number": 3, "routes": [], "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea", "name": "tempest-router-smoke-1111684309"}}
0.271 | 2001: 2017-09-14 19:47:51 | 2017-09-14 19:47:35,001 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 200 GET http://192.168.24.9:9696/v2.0/subnets?tenant_id=57da950fcf4e4a689e10172f79e6cfb2&cidr=10.100.0.0%2F28 0.376s
0.343 | 2002: 2017-09-14 19:47:51 | 2017-09-14 19:47:35,002 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2003: 2017-09-14 19:47:51 | Body: None
0.521 | 2004: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '14', 'content-location': 'http://192.168.24.9:9696/v2.0/subnets?tenant_id=57da950fcf4e4a689e10172f79e6cfb2&cidr=10.100.0.0%2F28', u'date': 'Thu, 14 Sep 2017 19:47:34 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-dce49065-4973-4928-88b5-a3f3ba97a47f'}
1.000 | 2005: 2017-09-14 19:47:51 | Body: {"subnets":[]}
0.223 | 2006: 2017-09-14 19:47:51 | 2017-09-14 19:47:35,797 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/subnets 0.794s
0.343 | 2007: 2017-09-14 19:47:51 | 2017-09-14 19:47:35,798 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.052 | 2008: 2017-09-14 19:47:51 | Body: {"subnet": {"ip_version": 4, "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "cidr": "10.100.0.0/28", "network_id": "87338d55-159e-4fd4-9429-080e9bfff009", "name": "tempest-subnet-smoke-2115807622"}}
0.521 | 2009: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '633', 'content-location': 'http://192.168.24.9:9696/v2.0/subnets', u'date': 'Thu, 14 Sep 2017 19:47:35 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-55c202c7-fe21-480c-bd1b-08948e265133'}
0.475 | 2010: 2017-09-14 19:47:51 | Body: {"subnet":{"service_types":[],"description":"","enable_dhcp":true,"tags":[],"network_id":"87338d55-159e-4fd4-9429-080e9bfff009","tenant_id":"57da950fcf4e4a689e10172f79e6cfb2","created_at":"2017-09-14T19:47:35Z","dns_nameservers":[],"updated_at":"2017-09-14T19:47:35Z","gateway_ip":"10.100.0.1","ipv6_ra_mode":null,"allocation_pools":[{"start":"10.100.0.2","end":"10.100.0.14"}],"host_routes":[],"revision_number":0,"ip_version":4,"ipv6_address_mode":null,"cidr":"10.100.0.0/28","project_id":"57da950fcf4e4a689e10172f79e6cfb2","id":"091aa358-a678-40cf-9eda-4ed8cc3211b2","subnetpool_id":null,"name":"tempest-subnet-smoke-2115807622"}}
0.271 | 2011: 2017-09-14 19:47:51 | 2017-09-14 19:47:38,745 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 200 PUT http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea/add_router_interface 2.946s
0.343 | 2012: 2017-09-14 19:47:51 | 2017-09-14 19:47:38,745 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2013: 2017-09-14 19:47:51 | Body: {"subnet_id": "091aa358-a678-40cf-9eda-4ed8cc3211b2"}
0.521 | 2014: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '309', 'content-location': 'http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea/add_router_interface', u'date': 'Thu, 14 Sep 2017 19:47:38 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-7a8a6c27-0bfe-4f01-828d-f4b1a9b84421'}
1.000 | 2015: 2017-09-14 19:47:51 | Body: {"network_id": "87338d55-159e-4fd4-9429-080e9bfff009", "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "subnet_id": "091aa358-a678-40cf-9eda-4ed8cc3211b2", "subnet_ids": ["091aa358-a678-40cf-9eda-4ed8cc3211b2"], "port_id": "33d9dd04-d3c4-4720-b267-9f920dd38dfb", "id": "0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea"}
0.199 | 2016: 2017-09-14 19:47:51 | 2017-09-14 19:47:38,898 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 200 GET http://192.168.24.9:9696/v2.0/networks 0.152s
0.343 | 2017: 2017-09-14 19:47:51 | 2017-09-14 19:47:38,899 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2018: 2017-09-14 19:47:51 | Body: None
0.553 | 2019: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '1414', 'content-location': 'http://192.168.24.9:9696/v2.0/networks', u'date': 'Thu, 14 Sep 2017 19:47:38 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-7c2e06b8-6e8e-4d76-b0df-f695e5c9e822'}
0.155 | 2020: 2017-09-14 19:47:51 | Body: {"networks":[{"provider:physical_network":"datacentre","ipv6_address_scope":null,"revision_number":5,"port_security_enabled":true,"mtu":1500,"id":"4dbcb14a-ce6a-4a93-a502-0ec748998fdd","router:external":true,"availability_zone_hints":[],"availability_zones":["nova"],"provider:segmentation_id":null,"ipv4_address_scope":null,"shared":false,"project_id":"40476ed3b5af448e80c6a8d4eb3fb482","status":"ACTIVE","subnets":["7f008882-6ada-4850-a62a-a1b870f18e63"],"description":"","tags":[],"updated_at":"2017-09-14T19:46:34Z","is_default":false,"qos_policy_id":null,"name":"public","admin_state_up":true,"tenant_id":"40476ed3b5af448e80c6a8d4eb3fb482","created_at":"2017-09-14T19:46:27Z","provider:network_type":"flat"},{"provider:physical_network":null,"ipv6_address_scope":null,"revision_number":4,"port_security_enabled":true,"mtu":1450,"id":"87338d55-159e-4fd4-9429-080e9bfff009","router:external":false,"availability_zone_hints":[],"availability_zones":["nova"],"provider:segmentation_id":51,"ipv4_address_scope":null,"shared":false,"project_id":"57da950fcf4e4a689e10172f79e6cfb2","status":"ACTIVE","subnets":["091aa358-a678-40cf-9eda-4ed8cc3211b2"],"description":"","tags":[],"updated_at":"2017-09-14T19:47:35Z","qos_policy_id":null,"name":"tempest-network-smoke--1596822762","admin_state_up":true,"tenant_id":"57da950fcf4e4a689e10172f79e6cfb2","created_at":"2017-09-14T19:47:31Z","provider:network_type":"vxlan"}]}
0.271 | 2021: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,012 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 200 GET http://192.168.24.9:9696/v2.0/subnets 0.112s
0.343 | 2022: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,012 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2023: 2017-09-14 19:47:51 | Body: None
0.521 | 2024: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '1250', 'content-location': 'http://192.168.24.9:9696/v2.0/subnets', u'date': 'Thu, 14 Sep 2017 19:47:39 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-d9fe860c-88ca-4379-9807-5f439f6a43f9'}
0.362 | 2025: 2017-09-14 19:47:51 | Body: {"subnets":[{"service_types":[],"description":"","enable_dhcp":true,"tags":[],"network_id":"87338d55-159e-4fd4-9429-080e9bfff009","tenant_id":"57da950fcf4e4a689e10172f79e6cfb2","created_at":"2017-09-14T19:47:35Z","dns_nameservers":[],"updated_at":"2017-09-14T19:47:35Z","ipv6_ra_mode":null,"allocation_pools":[{"start":"10.100.0.2","end":"10.100.0.14"}],"gateway_ip":"10.100.0.1","revision_number":0,"ipv6_address_mode":null,"ip_version":4,"host_routes":[],"cidr":"10.100.0.0/28","project_id":"57da950fcf4e4a689e10172f79e6cfb2","id":"091aa358-a678-40cf-9eda-4ed8cc3211b2","subnetpool_id":null,"name":"tempest-subnet-smoke-2115807622"},{"service_types":[],"description":"","enable_dhcp":false,"tags":[],"network_id":"4dbcb14a-ce6a-4a93-a502-0ec748998fdd","tenant_id":"40476ed3b5af448e80c6a8d4eb3fb482","created_at":"2017-09-14T19:46:34Z","dns_nameservers":[],"updated_at":"2017-09-14T19:46:34Z","ipv6_ra_mode":null,"allocation_pools":[{"start":"192.168.24.100","end":"192.168.24.120"}],"gateway_ip":"192.168.24.1","revision_number":0,"ipv6_address_mode":null,"ip_version":4,"host_routes":[],"cidr":"192.168.24.0/24","project_id":"40476ed3b5af448e80c6a8d4eb3fb482","id":"7f008882-6ada-4850-a62a-a1b870f18e63","subnetpool_id":null,"name":"ext-subnet"}]}
0.271 | 2026: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,131 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 200 GET http://192.168.24.9:9696/v2.0/routers 0.118s
0.343 | 2027: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,132 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2028: 2017-09-14 19:47:51 | Body: None
0.521 | 2029: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '698', 'content-location': 'http://192.168.24.9:9696/v2.0/routers', u'date': 'Thu, 14 Sep 2017 19:47:39 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-d905ced8-1daf-498f-8a8d-0cdfb7bd8130'}
0.543 | 2030: 2017-09-14 19:47:51 | Body: {"routers": [{"status": "ACTIVE", "external_gateway_info": {"network_id": "4dbcb14a-ce6a-4a93-a502-0ec748998fdd", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "7f008882-6ada-4850-a62a-a1b870f18e63", "ip_address": "192.168.24.107"}]}, "availability_zone_hints": [], "availability_zones": ["nova"], "description": "", "tags": [], "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:31Z", "admin_state_up": true, "updated_at": "2017-09-14T19:47:37Z", "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "flavor_id": null, "revision_number": 4, "routes": [], "ha": false, "id": "0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea", "name": "tempest-router-smoke-1111684309"}]}
0.269 | 2031: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,601 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 200 POST http://192.168.24.9:8774/v2.1/os-keypairs 0.468s
0.343 | 2032: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,602 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.462 | 2033: 2017-09-14 19:47:51 | Body: {"keypair": {"name": "tempest-TestNetworkBasicOps-198610858"}}
0.743 | 2034: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '2316', 'content-location': 'http://192.168.24.9:8774/v2.1/os-keypairs', u'x-compute-request-id': 'req-c3875f0c-bee1-4db7-9c5a-760040ab8fc7', u'vary': 'OpenStack-API-Version,X-OpenStack-Nova-API-Version,Accept-Encoding', u'server': 'Apache', u'openstack-api-version': 'compute 2.1', u'connection': 'close', u'x-openstack-nova-api-version': '2.1', u'date': 'Thu, 14 Sep 2017 19:47:39 GMT', u'content-type': 'application/json', u'x-openstack-request-id': 'req-c3875f0c-bee1-4db7-9c5a-760040ab8fc7'}
0.462 | 2035: 2017-09-14 19:47:51 | Body: {"keypair": {"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD4Z0mwo76ucyvkbIzRwX41cMX48kv4UGNjOF0UuY9Qxu1uXZLsEVobmjnHc9UTWxz2VBxZroubEKXmdSVwCu+DRNH2FK84LQGvVFjZp8acAnp5MDbCRb6BEZAPB0g4AGY/TXsWn+mnKtpuDGgzx08GQKVMit4GI6WO9cEtbCLgCdNZg3ormfmb6mcQzHm74VzvOJjbUVBrHtzin1rxtBvHXESjJR8tVHia5JuZlQlvVKJrpp3tT/7zSAKFSPptKO4E0IMIbbZEIK2IPPWb2PvI5m6jPv8ABk+AUqMA3O51Bj7IymmOAnCwrMYZLlVwilOOz1+HOVDKl+Bd/KKe6Jjp Generated-by-Nova", "private_key": "-----BEGIN RSA PRIVATE KEY-----
0.462 | 2035: MIIEpQIBAAKCAQEA+GdJsKO+rnMr5GyM0cF+NXDF+PJL+FBjYzhdFLmPUMbtbl2S
0.462 | 2035: 7BFaG5o5x3PVE1sc9lQcWa6LmxCl5nUlcArvg0TR9hSvOC0Br1RY2afGnAJ6eTA2
0.462 | 2035: wkW+gRGQDwdIOABmP017Fp/ppyrabgxoM8dPBkClTIreBiOljvXBLWwi4AnTWYN6
0.462 | 2035: K5n5m+pnEMx5u+Fc7ziY21FQax7c4p9a8bQbx1xEoyUfLVR4muSbmZUJb1Sia6ad
0.462 | 2035: 7U/+80gChUj6bSjuBNCDCG22RCCtiDz1m9j7yOZuoz7/AAZPgFKjANzudQY+yMpp
0.462 | 2035: jgJwsKzGGS5VcIpTjs9fhzlQypfgXfyinuiY6QIDAQABAoIBAQCO1i3cgbvi6l0h
0.462 | 2035: 431yHBZrumZMiV8/8VHFOyLjCPStGUZsQSh70FqFJsD0vNmYYLsdti37L9AdIYyU
0.462 | 2035: IWaer9scG5W0MB61acPfQtw4hC5DhXVRfHTwb1RlOWkiCyju8uPYoMAXuWqM8qpZ
0.462 | 2035: v46nv4LkcIXcprmWxCauNwiXCuhe9IVsa3QmTtr1YydYmkqsNMZPqHc8sBLMuSEC
0.462 | 2035: 5tu/iHTw2cPb5LeMpyZqcc2+aFrfqQ0QG43fwa9nIObjgrLlO5RdQ5Rq/y2lITwO
0.462 | 2035: xhnj/ZfWzNvFU2qqaO+X6ckCxWvE4KBH+4iHXcrK3kDiyJfQPyEYDqzZWcN1CdkX
0.462 | 2035: HjlH0mDlAoGBAP1q5Xv6V3vdjcfWpfBE+sELL2lXA8ifSJu+oG6X6MUSPkGGnRTy
0.462 | 2035: IeQu1YTKTkj60B7s9XHqFA2XPp52n8xkdx0e6VD8TQXN9CjY2jJ4UexktOaHeXGt
0.462 | 2035: BNjjB2V0A3Ah0yhJH7ej9dy54Qqm385DFlks2h1ny5B038JZf0CKb8J/AoGBAPrv
0.462 | 2035: T5bbSRlfg9jzQnfyxVwX47jaWz4VyaXuMCfV840LOPLwdBlbaao82FQFdvX8LyjY
0.462 | 2035: kW83FtufaLuOnNqyLjjF8tBfGh2vx9lqgkjm8yIxMUo/r5HIH1UzLqmCj9Mn53Or
0.462 | 2035: YcmRliUi1ChK/5ExFnfNqkF2sjpEOYofv6JQ0yCXAoGAHLf3h8RIE8Z44dPGfGMk
0.462 | 2035: 06mz1slAqYS7ksLYpY/bljHKElzyAkSqaJSSJ/lRQ94ApfCOpDk1ypCATS7+Twh3
0.462 | 2035: ozo0kLP2+phrehGRMLUH+7Ux9woVTjegW//mf63VEekTZ/ZZfNM88qWyBiYBKPvb
0.462 | 2035: VYHKuhhgLl/AV+MSk7rcjXsCgYEApfmsFwgsLZAz0Uf1awk06VkT0qz7NPGMUyhc
0.462 | 2035: ybyGP4Bwxp+s6yi/l05veAO1T7biKo40g+9YOyQRTh1iOnUohXzIwnJk/8XI1lHQ
0.462 | 2035: ow+/OZp4E7hUFITd1Kjp8EzffLckLCnroDufqCiFU7FnoU1qowtx1aMA9/Ewbd/a
0.462 | 2035: DS6QtzkCgYEAtj+bE5cGC/QN91tV5WM32pDCCJ5EtFchMoxHYp6fuzsWv5DHmLtR
0.462 | 2035: Kxw8oFWpuSv2Q4XGB5jbvGAfdwQPnsDWnX3megILUY05wln3AJeBl7+EDalwTLyj
0.462 | 2035: hqu5DLNxNfAm230W6eQ2wSumr/Lic/m7boWeBIu7SXQG1ttE4OmVXg4=
0.462 | 2035: -----END RSA PRIVATE KEY-----
0.462 | 2035: ", "user_id": "6172973c61924f49acf4fd5cbbf58e03", "name": "tempest-TestNetworkBasicOps-198610858", "fingerprint": "26:26:77:b4:35:8a:41:71:78:39:e2:d5:e8:b0:6f:36"}}
0.223 | 2036: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,922 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/security-groups 0.317s
0.343 | 2037: 2017-09-14 19:47:51 | 2017-09-14 19:47:39,923 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.317 | 2038: 2017-09-14 19:47:51 | Body: {"security_group": {"tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "description": "tempest-secgroup-smoke-1905469331 description", "name": "tempest-secgroup-smoke-1905469331"}}
0.521 | 2039: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '1410', 'content-location': 'http://192.168.24.9:9696/v2.0/security-groups', u'date': 'Thu, 14 Sep 2017 19:47:39 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-74560587-0d2c-4d8f-9853-3d403ddcbc71'}
0.317 | 2040: 2017-09-14 19:47:51 | Body: {"security_group": {"description": "tempest-secgroup-smoke-1905469331 description", "tags": [], "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:39Z", "updated_at": "2017-09-14T19:47:39Z", "security_group_rules": [{"direction": "egress", "protocol": null, "description": null, "tags": [], "port_range_max": null, "updated_at": "2017-09-14T19:47:39Z", "revision_number": 0, "id": "86f39474-8d10-40eb-9236-fa459a346bb0", "remote_group_id": null, "remote_ip_prefix": null, "created_at": "2017-09-14T19:47:39Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "port_range_min": null, "ethertype": "IPv4", "project_id": "57da950fcf4e4a689e10172f79e6cfb2"}, {"direction": "egress", "protocol": null, "description": null, "tags": [], "port_range_max": null, "updated_at": "2017-09-14T19:47:39Z", "revision_number": 0, "id": "f4fd3e85-2ca3-4750-85fd-e17ac8536c4a", "remote_group_id": null, "remote_ip_prefix": null, "created_at": "2017-09-14T19:47:39Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "port_range_min": null, "ethertype": "IPv6", "project_id": "57da950fcf4e4a689e10172f79e6cfb2"}], "revision_number": 2, "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "name": "tempest-secgroup-smoke-1905469331"}}
0.223 | 2041: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,170 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/security-group-rules 0.246s
0.343 | 2042: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,170 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2043: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"direction": "ingress", "protocol": "tcp", "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "port_range_max": 22, "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": 22}}
0.521 | 2044: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '507', 'content-location': 'http://192.168.24.9:9696/v2.0/security-group-rules', u'date': 'Thu, 14 Sep 2017 19:47:40 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-f86e0094-8d3c-42f0-96f9-a7447745648b'}
1.000 | 2045: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"remote_group_id": null, "direction": "ingress", "protocol": "tcp", "description": "", "ethertype": "IPv4", "remote_ip_prefix": null, "port_range_max": 22, "updated_at": "2017-09-14T19:47:40Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": 22, "revision_number": 0, "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:40Z", "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "bfcb77b1-c1f3-41b3-b292-3c9ac033878e"}}
0.223 | 2046: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,419 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/security-group-rules 0.248s
0.343 | 2047: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,420 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2048: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"direction": "egress", "protocol": "tcp", "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "port_range_max": 22, "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": 22}}
0.521 | 2049: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '506', 'content-location': 'http://192.168.24.9:9696/v2.0/security-group-rules', u'date': 'Thu, 14 Sep 2017 19:47:40 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-834fe135-d9ef-4b29-9758-01d91184a03c'}
1.000 | 2050: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"remote_group_id": null, "direction": "egress", "protocol": "tcp", "description": "", "ethertype": "IPv4", "remote_ip_prefix": null, "port_range_max": 22, "updated_at": "2017-09-14T19:47:40Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": 22, "revision_number": 0, "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:40Z", "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "ce8c2d6f-8ae6-4325-a567-840a973f039a"}}
0.223 | 2051: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,636 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/security-group-rules 0.215s
0.343 | 2052: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,637 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2053: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "direction": "ingress", "protocol": "icmp", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62"}}
0.521 | 2054: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '512', 'content-location': 'http://192.168.24.9:9696/v2.0/security-group-rules', u'date': 'Thu, 14 Sep 2017 19:47:40 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-047071b2-5a06-445a-b362-0f9f49f58ee0'}
1.000 | 2055: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"remote_group_id": null, "direction": "ingress", "protocol": "icmp", "description": "", "ethertype": "IPv4", "remote_ip_prefix": null, "port_range_max": null, "updated_at": "2017-09-14T19:47:40Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": null, "revision_number": 0, "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:40Z", "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "d5f7e912-4452-4708-ba66-919e1ea7c6fd"}}
0.223 | 2056: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,926 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/security-group-rules 0.287s
0.343 | 2057: 2017-09-14 19:47:51 | 2017-09-14 19:47:40,926 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2058: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "direction": "egress", "protocol": "icmp", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62"}}
0.521 | 2059: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '511', 'content-location': 'http://192.168.24.9:9696/v2.0/security-group-rules', u'date': 'Thu, 14 Sep 2017 19:47:40 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-c48a3255-cff2-4475-a23a-7a2153ec23bd'}
1.000 | 2060: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"remote_group_id": null, "direction": "egress", "protocol": "icmp", "description": "", "ethertype": "IPv4", "remote_ip_prefix": null, "port_range_max": null, "updated_at": "2017-09-14T19:47:40Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": null, "revision_number": 0, "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:40Z", "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "462252a6-7902-455b-85cf-02cb0886bd0f"}}
0.223 | 2061: 2017-09-14 19:47:51 | 2017-09-14 19:47:41,199 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/security-group-rules 0.272s
0.343 | 2062: 2017-09-14 19:47:51 | 2017-09-14 19:47:41,199 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2063: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "direction": "ingress", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "protocol": "icmp", "ethertype": "IPv6"}}
0.521 | 2064: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '512', 'content-location': 'http://192.168.24.9:9696/v2.0/security-group-rules', u'date': 'Thu, 14 Sep 2017 19:47:41 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-7500bae6-d656-4b98-9a4a-09352b6bb1b4'}
1.000 | 2065: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"remote_group_id": null, "direction": "ingress", "protocol": "icmp", "description": "", "ethertype": "IPv6", "remote_ip_prefix": null, "port_range_max": null, "updated_at": "2017-09-14T19:47:41Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": null, "revision_number": 0, "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:41Z", "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "7656768f-771b-4460-ba81-2efa3bb7831f"}}
0.223 | 2066: 2017-09-14 19:47:51 | 2017-09-14 19:47:41,385 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:9696/v2.0/security-group-rules 0.185s
0.343 | 2067: 2017-09-14 19:47:51 | 2017-09-14 19:47:41,386 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2068: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "direction": "egress", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "protocol": "icmp", "ethertype": "IPv6"}}
0.521 | 2069: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '511', 'content-location': 'http://192.168.24.9:9696/v2.0/security-group-rules', u'date': 'Thu, 14 Sep 2017 19:47:41 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-5856c48f-cc12-4c82-b65f-8935fee85c6c'}
1.000 | 2070: 2017-09-14 19:47:51 | Body: {"security_group_rule": {"remote_group_id": null, "direction": "egress", "protocol": "icmp", "description": "", "ethertype": "IPv6", "remote_ip_prefix": null, "port_range_max": null, "updated_at": "2017-09-14T19:47:41Z", "security_group_id": "433b71ba-9604-4c12-9b4e-f63b1b9a2b62", "port_range_min": null, "revision_number": 0, "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "created_at": "2017-09-14T19:47:41Z", "project_id": "57da950fcf4e4a689e10172f79e6cfb2", "id": "1b5244db-c9dc-4fbd-a1f2-9f427eb17afa"}}
0.223 | 2071: 2017-09-14 19:47:51 | 2017-09-14 19:47:41,980 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 201 POST http://192.168.24.9:5000/v3/auth/tokens
0.343 | 2072: 2017-09-14 19:47:51 | 2017-09-14 19:47:41,981 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json'}
1.000 | 2073: 2017-09-14 19:47:51 | Body: <omitted>
0.521 | 2074: 2017-09-14 19:47:51 | Response - Headers: {'status': '201', u'content-length': '5872', 'content-location': 'http://192.168.24.9:5000/v3/auth/tokens', u'x-subject-token': '<omitted>', u'vary': 'X-Auth-Token', u'server': 'Apache', u'connection': 'close', u'date': 'Thu, 14 Sep 2017 19:47:41 GMT', u'content-type': 'application/json', u'x-openstack-request-id': 'req-0642980a-491e-4909-8ed3-bb5da2a05e50'}
0.631 | 2075: 2017-09-14 19:47:51 | Body: {"token": {"is_domain": false, "methods": ["password"], "roles": [{"id": "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_"}], "expires_at": "2017-09-14T20:47:41.000000Z", "project": {"domain": {"id": "default", "name": "Default"}, "id": "57da950fcf4e4a689e10172f79e6cfb2", "name": "tempest-TestNetworkBasicOps-1890248812"}, "catalog": [{"endpoints": [{"region_id": "regionOne", "url": "http://192.168.24.9:8778/placement", "region": "regionOne", "interface": "public", "id": "34026a9bf56b43199210011704d180d5"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8778/placement", "region": "regionOne", "interface": "admin", "id": "8dbff4d588b54fc6a631e4ab70b2ee00"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8778/placement", "region": "regionOne", "interface": "internal", "id": "d00a709786af4d38bed4704b407e7457"}], "type": "placement", "id": "1d3f04d21606403e840df63ba2b612a4", "name": "placement"}, {"endpoints": [{"region_id": "regionOne", "url": "http://192.168.24.9:8004/v1/57da950fcf4e4a689e10172f79e6cfb2", "region": "regionOne", "interface": "public", "id": "39249de3d8484e78b4cc50d163dbc62a"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8004/v1/57da950fcf4e4a689e10172f79e6cfb2", "region": "regionOne", "interface": "internal", "id": "577d3d79b75c40f2b08dac259094bf7e"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8004/v1/57da950fcf4e4a689e10172f79e6cfb2", "region": "regionOne", "interface": "admin", "id": "bfebb66923e041c3905a4a9a5d491b58"}], "type": "orchestration", "id": "4233bab1986e4416ab3d8ffc0e582b66", "name": "heat"}, {"endpoints": [{"region_id": "regionOne", "url": "http://192.168.24.9:8386/v1.1/57da950fcf4e4a689e10172f79e6cfb2", "region": "regionOne", "interface": "internal", "id": "71bd102e879243e6b0fe4deda8fea471"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8386/v1.1/57da950fcf4e4a689e10172f79e6cfb2", "region": "regionOne", "interface": "admin", "id": "c80c5063a9e54739842a017639551fac"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8386/v1.1/57da950fcf4e4a689e10172f79e6cfb2", "region": "regionOne", "interface": "public", "id": "f2cea234cdd0408a86eb49c9312bf36c"}], "type": "data-processing", "id": "82097b0c52ea44579d0625c17d91f458", "name": "sahara"}, {"endpoints": [{"region_id": "regionOne", "url": "http://192.168.24.9:35357", "region": "regionOne", "interface": "admin", "id": "34e18f00565643029169cabb2d3aec0f"}, {"region_id": "regionOne", "url": "http://192.168.24.9:5000", "region": "regionOne", "interface": "internal", "id": "50b0d00b820b4e3f8901494f210322a3"}, {"region_id": "regionOne", "url": "http://192.168.24.9:5000", "region": "regionOne", "interface": "public", "id": "c5bed01265664a1da1a6f25d4860a060"}], "type": "identity", "id": "b1e9bf19fd784ce4917a3e264a612ce5", "name": "keystone"}, {"endpoints": [{"region_id": "regionOne", "url": "http://192.168.24.9:9696", "region": "regionOne", "interface": "admin", "id": "0c0e2413fde749fb8ac24edd1bf2b94f"}, {"region_id": "regionOne", "url": "http://192.168.24.9:9696", "region": "regionOne", "interface": "public", "id": "419b8cf6124347c187c6020ed9e0916a"}, {"region_id": "regionOne", "url": "http://192.168.24.9:9696", "region": "regionOne", "interface": "internal", "id": "4b09e9c7eedc46c090264d024351459f"}], "type": "network", "id": "b2d02ef6ec8648bf90b160f42fb77609", "name": "neutron"}, {"endpoints": [{"region_id": "regionOne", "url": "http://192.168.24.9:8774/v2.1", "region": "regionOne", "interface": "public", "id": "127443b5e9134c7db7a46c16624be019"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8774/v2.1", "region": "regionOne", "interface": "admin", "id": "3d5d8960d6994bdcb3f811a09fa180c6"}, {"region_id": "regionOne", "url": "http://192.168.24.9:8774/v2.1", "region": "regionOne", "interface": "internal", "id": "6c7dfcf455894402a46a12d30df3fc80"}], "type": "compute", "id": "c7b95b582e82475ba80de4b3b139a121", "name": "nova"}, {"endpoints": [{"region_id": "regionOne", "url": "http://192.168.24.9:8000/v1", "region": "regionOne", "interface": "public", "id": "2d10b1755fb74159a373b11d59a5caca"},
0.563 | 2076: 2017-09-14 19:47:51 | 2017-09-14 19:47:41,984 11888 INFO [tempest.lib.common.fixed_network] (TestNetworkBasicOps:test_network_basic_ops) Found network None available for tenant
0.239 | 2077: 2017-09-14 19:47:51 | 2017-09-14 19:47:42,028 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:test_network_basic_ops): 500 POST http://192.168.24.9:8774/v2.1/servers 0.042s
0.343 | 2078: 2017-09-14 19:47:51 | 2017-09-14 19:47:42,028 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.527 | 2079: 2017-09-14 19:47:51 | Body: {"server": {"name": "tempest-TestNetworkBasicOps-server-966396963", "imageRef": "6c689e91-3408-451e-bc58-7c03eddd0028", "key_name": "tempest-TestNetworkBasicOps-198610858", "flavorRef": "8f773c57-4f7b-49f3-bef7-4fda99ada582", "networks": [{"uuid": "87338d55-159e-4fd4-9429-080e9bfff009"}], "security_groups": [{"name": "tempest-secgroup-smoke-1905469331"}]}}
0.741 | 2080: 2017-09-14 19:47:51 | Response - Headers: {'status': '500', u'content-length': '199', 'content-location': 'http://192.168.24.9:8774/v2.1/servers', u'x-compute-request-id': 'req-6e55fff0-75a9-4060-ac98-fed799b3fbe1', u'vary': 'OpenStack-API-Version,X-OpenStack-Nova-API-Version', u'server': 'Apache', u'openstack-api-version': 'compute 2.1', u'connection': 'close', u'x-openstack-nova-api-version': '2.1', u'date': 'Thu, 14 Sep 2017 19:47:41 GMT', u'content-type': 'application/json; charset=UTF-8', u'x-openstack-request-id': 'req-6e55fff0-75a9-4060-ac98-fed799b3fbe1'}
0.615 | 2081: 2017-09-14 19:47:51 | Body: {"computeFault": {"message": "Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ and attach the Nova API log if possible.
0.615 | 2081: <class 'ConfigParser.ParsingError'>", "code": 500}}
0.195 | 2082: 2017-09-14 19:47:51 | 2017-09-14 19:47:42,281 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:_run_cleanups): 204 DELETE http://192.168.24.9:9696/v2.0/security-groups/433b71ba-9604-4c12-9b4e-f63b1b9a2b62 0.243s
0.343 | 2083: 2017-09-14 19:47:51 | 2017-09-14 19:47:42,281 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2084: 2017-09-14 19:47:51 | Body: None
0.521 | 2085: 2017-09-14 19:47:51 | Response - Headers: {'status': '204', u'content-length': '0', 'content-location': 'http://192.168.24.9:9696/v2.0/security-groups/433b71ba-9604-4c12-9b4e-f63b1b9a2b62', u'date': 'Thu, 14 Sep 2017 19:47:42 GMT', u'connection': 'close', u'x-openstack-request-id': 'req-e6cfac6b-6693-4c88-9229-5e03d99e3158'}
0.000 | 2086: 2017-09-14 19:47:51 | Body:
0.406 | 2087: 2017-09-14 19:47:51 | 2017-09-14 19:47:42,313 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:_run_cleanups): 202 DELETE http://192.168.24.9:8774/v2.1/os-keypairs/tempest-TestNetworkBasicOps-198610858 0.031s
0.343 | 2088: 2017-09-14 19:47:51 | 2017-09-14 19:47:42,314 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2089: 2017-09-14 19:47:51 | Body: None
0.748 | 2090: 2017-09-14 19:47:51 | Response - Headers: {'status': '202', u'content-length': '0', 'content-location': 'http://192.168.24.9:8774/v2.1/os-keypairs/tempest-TestNetworkBasicOps-198610858', u'x-compute-request-id': 'req-e1738cc0-416f-407a-a4d8-03c0a9c492d9', u'vary': 'OpenStack-API-Version,X-OpenStack-Nova-API-Version', u'server': 'Apache', u'openstack-api-version': 'compute 2.1', u'connection': 'close', u'x-openstack-nova-api-version': '2.1', u'date': 'Thu, 14 Sep 2017 19:47:42 GMT', u'content-type': 'application/json', u'x-openstack-request-id': 'req-e1738cc0-416f-407a-a4d8-03c0a9c492d9'}
0.000 | 2091: 2017-09-14 19:47:51 | Body:
0.195 | 2092: 2017-09-14 19:47:51 | 2017-09-14 19:47:44,408 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:_run_cleanups): 200 PUT http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea/remove_router_interface 2.093s
0.343 | 2093: 2017-09-14 19:47:51 | 2017-09-14 19:47:44,409 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
1.000 | 2094: 2017-09-14 19:47:51 | Body: {"subnet_id": "091aa358-a678-40cf-9eda-4ed8cc3211b2"}
0.521 | 2095: 2017-09-14 19:47:51 | Response - Headers: {'status': '200', u'content-length': '309', 'content-location': 'http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea/remove_router_interface', u'date': 'Thu, 14 Sep 2017 19:47:44 GMT', u'content-type': 'application/json', u'connection': 'close', u'x-openstack-request-id': 'req-d6751190-5476-4975-8e0b-d91ae7512725'}
1.000 | 2096: 2017-09-14 19:47:51 | Body: {"network_id": "87338d55-159e-4fd4-9429-080e9bfff009", "tenant_id": "57da950fcf4e4a689e10172f79e6cfb2", "subnet_id": "091aa358-a678-40cf-9eda-4ed8cc3211b2", "subnet_ids": ["091aa358-a678-40cf-9eda-4ed8cc3211b2"], "port_id": "33d9dd04-d3c4-4720-b267-9f920dd38dfb", "id": "0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea"}
0.195 | 2097: 2017-09-14 19:47:51 | 2017-09-14 19:47:46,157 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:_run_cleanups): 204 DELETE http://192.168.24.9:9696/v2.0/subnets/091aa358-a678-40cf-9eda-4ed8cc3211b2 1.747s
0.343 | 2098: 2017-09-14 19:47:51 | 2017-09-14 19:47:46,158 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2099: 2017-09-14 19:47:51 | Body: None
0.521 | 2100: 2017-09-14 19:47:51 | Response - Headers: {'status': '204', u'content-length': '0', 'content-location': 'http://192.168.24.9:9696/v2.0/subnets/091aa358-a678-40cf-9eda-4ed8cc3211b2', u'date': 'Thu, 14 Sep 2017 19:47:46 GMT', u'connection': 'close', u'x-openstack-request-id': 'req-82bc9636-9065-4653-8b32-6586bd157e0e'}
0.000 | 2101: 2017-09-14 19:47:51 | Body:
0.195 | 2102: 2017-09-14 19:47:51 | 2017-09-14 19:47:47,702 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:_run_cleanups): 204 DELETE http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea 1.543s
0.343 | 2103: 2017-09-14 19:47:51 | 2017-09-14 19:47:47,703 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2104: 2017-09-14 19:47:51 | Body: None
0.521 | 2105: 2017-09-14 19:47:51 | Response - Headers: {'status': '204', u'content-length': '0', 'content-location': 'http://192.168.24.9:9696/v2.0/routers/0d9bf4d6-d78d-4873-9d3e-3cc391c2e2ea', u'date': 'Thu, 14 Sep 2017 19:47:47 GMT', u'connection': 'close', u'x-openstack-request-id': 'req-54239d01-1315-4274-934e-e4451682ea2c'}
0.000 | 2106: 2017-09-14 19:47:51 | Body:
0.138 | 2107: 2017-09-14 19:47:51 | 2017-09-14 19:47:48,591 11888 INFO [tempest.lib.common.rest_client] Request (TestNetworkBasicOps:_run_cleanups): 204 DELETE http://192.168.24.9:9696/v2.0/networks/87338d55-159e-4fd4-9429-080e9bfff009 0.887s
0.343 | 2108: 2017-09-14 19:47:51 | 2017-09-14 19:47:48,592 11888 DEBUG [tempest.lib.common.rest_client] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.293 | 2109: 2017-09-14 19:47:51 | Body: None
0.553 | 2110: 2017-09-14 19:47:51 | Response - Headers: {'status': '204', u'content-length': '0', 'content-location': 'http://192.168.24.9:9696/v2.0/networks/87338d55-159e-4fd4-9429-080e9bfff009', u'date': 'Thu, 14 Sep 2017 19:47:48 GMT', u'connection': 'close', u'x-openstack-request-id': 'req-61fa4af9-781a-4c3a-9d6d-bb653a3f1b7d'}
0.000 | 2111: 2017-09-14 19:47:51 | Body:

0.000 | 2128: 2017-09-14 19:47:51 | ==============
0.000 | 2129: 2017-09-14 19:47:51 | - Worker 0 (1 tests) => 0:00:17.706163
0.000 | 2130: 2017-09-14 19:47:51 |
0.293 | 2131: 2017-09-14 19:47:51 | No tests were successful during the run
0.000 | 2132: 2017-09-14 19:47:52 | Test id Runtime (s)

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/mistral/executor.log.txt.gz
0.000 | 3308: 2017-09-14 19:02:08.194 2342 DEBUG swiftclient [req-b09321cf-f105-401a-be3d-a7e01aa86a87 ad40ba3b56b8405a8843c9e9e0032aec 2bc0cdfdc3664800b9a07e4b9e4b0882 - default default] REQ: curl -i http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/plan-environment.yaml -X GET -H "X-Auth-Token: gAAAAABZutHS1aPw..." http_log /usr/lib/python2.7/site-packages/swiftclient/client.py:165
0.000 | 3309: 2017-09-14 19:02:08.195 2342 DEBUG swiftclient [req-b09321cf-f105-401a-be3d-a7e01aa86a87 ad40ba3b56b8405a8843c9e9e0032aec 2bc0cdfdc3664800b9a07e4b9e4b0882 - default default] RESP STATUS: 200 OK http_log /usr/lib/python2.7/site-packages/swiftclient/client.py:166
0.000 | 3310: 2017-09-14 19:02:08.195 2342 DEBUG swiftclient [req-b09321cf-f105-401a-be3d-a7e01aa86a87 ad40ba3b56b8405a8843c9e9e0032aec 2bc0cdfdc3664800b9a07e4b9e4b0882 - default default] RESP HEADERS: {u'Content-Length': u'12041', u'Accept-Ranges': u'bytes', u'Last-Modified': u'Thu, 14 Sep 2017 19:01:55 GMT', u'Etag': u'ba937aeb8b462438a02e4d1a136f07f5', u'X-Timestamp': u'1505415714.18250', u'X-Trans-Id': u'txe3cb693fe3a14ed1ab1de-0059bad230', u'Date': u'Thu, 14 Sep 2017 19:02:08 GMT', u'Content-Type': u'application/octet-stream', u'X-Openstack-Request-Id': u'txe3cb693fe3a14ed1ab1de-0059bad230'} http_log /usr/lib/python2.7/site-packages/swiftclient/client.py:167
0.355 | 3311: 2017-09-14 19:02:08.247 2342 DEBUG heatclient.common.http [req-b09321cf-f105-401a-be3d-a7e01aa86a87 ad40ba3b56b8405a8843c9e9e0032aec 2bc0cdfdc3664800b9a07e4b9e4b0882 - default default] curl -g -i -X POST -H 'X-Region-Name: regionOne' -H 'X-Auth-Token: {SHA1}5ff9a2fd5133a5fe1ad4de68f1c2c2ccd4f58108' -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'User-Agent: python-heatclient' -d '{"files": {"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/openvswitch.yaml": "{\"parameter_groups\": [{\"description\": \"Do not use deprecated params, they will be removed.\", \"parameters\": [\"HostCpusList\", \"NeutronDpdkCoreList\", \"NeutronDpdkMemoryChannels\", \"NeutronDpdkSocketMemory\", \"NeutronDpdkDriverType\"], \"label\": \"deprecated\"}], \"heat_template_version\": \"pike\", \"description\": \"Open vSwitch Configuration\
0.355 | 3311: \", \"parameters\": {\"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NeutronDpdkCoreList\": {\"default\": \"\", \"type\": \"string\", \"description\": \"List of cores to be used for DPDK Poll Mode Driver\", \"constraints\": [{\"allowed_pattern\": \"[0-9,-]*\"}]}, \"HostCpusList\": {\"default\": \"\", \"type\": \"string\", \"description\": \"List of cores to be used for host process\", \"constraints\": [{\"allowed_pattern\": \"[0-9,-]*\"}]}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NeutronDpdkSocketMemory\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Memory allocated for each socket\"}, \"OvsDpdkSocketMemory\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Sets the amount of hugepage memory to assign per NUMA node. It is recommended to use the socket closest to the PCIe slot used for the desired DPDK NIC. The format should be in \\\"<socket 0 mem>, <socket 1 mem>, <socket n mem>\\\", where the value is specified in MB. For example: \\\"1024,0\\\".\
0.355 | 3311: \"}, \"OvsDpdkMemoryChannels\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Number of memory channels per socket to be used for DPDK\", \"constraints\": [{\"allowed_pattern\": \"[0-9]*\"}]}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"OvsDpdkCoreList\": {\"default\": \"\", \"type\": \"string\", \"description\": \"List of cores to be used for DPDK lcore threads. Note, these threads are used by the OVS control path for validator and handling functions.\
0.355 | 3311: \", \"constraints\": [{\"allowed_pattern\": \"[0-9,-]*\"}]}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NeutronDpdkMemoryChannels\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Number of memory channels to be used for DPDK\", \"constraints\": [{\"allowed_pattern\": \"[0-9]*\"}]}, \"NeutronDpdkDriverType\": {\"default\": \"vfio-pci\", \"type\": \"string\", \"description\": \"DPDK Driver type\"}, \"OvsPmdCoreList\": {\"default\": \"\", \"type\": \"string\", \"description\": \"A list or range of CPU cores for PMD threads to be pinned to. Note, NIC location to cores on socket, number of hyper-threaded logical cores, and desired number of PMD threads can all play a role in configuring this setting. These cores should be on the same socket where OvsDpdkSocketMemory is assigned. If using hyperthreading then specify both logical cores that would equal the physical core. Also, specifying more than one core will trigger multiple PMD threads to be spawned which may improve dataplane performance.\
0.355 | 3311: \", \"constraints\": [{\"allowed_pattern\": \"[0-9,-]*\"}]}, \"OvsDpdkDriverType\": {\"default\": \"vfio-pci\", \"type\": \"string\", \"description\": \"DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.\
0.355 | 3311: \"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Open vSwitch service.\", \"value\": {\"service_name\": \"openvswitch\", \"config_settings\": {\"map_replace\": [{\"map_replace\": [{\"vswitch::dpdk::host_core_list\": \"OvsDpdkCoreList\", \"vswitch::dpdk::driver_type\": \"OvsDpdkDriverType\", \"vswitch::dpdk::memory_channels\": \"OvsDpdkMemoryChannels\", \"vswitch::dpdk::socket_mem\": \"OvsDpdkSocketMemory\", \"vswitch::dpdk::pmd_core_list\": \"OvsPmdCoreList\"}, {\"values\": {\"get_param\": [\"RoleParameters\"]}}]}, {\"values\": {\"OvsDpdkMemoryChannels\": {\"if\": [\"mem_channels_empty\", {\"get_param\": \"NeutronDpdkMemoryChannels\"}, {\"get_param\": \"OvsDpdkMemoryChannels\"}]}, \"OvsDpdkDriverType\": {\"if\": [\"driver_not_set\", {\"get_param\": \"NeutronDpdkDriverType\"}, {\"get_param\": \"OvsDpdkDriverType\"}]}, \"OvsDpdkSocketMemory\": {\"if\": [\"socket_mem_empty\", {\"get_param\": \"NeutronDpdkSocketMemory\"}, {\"get_param\": \"OvsDpdkSocketMemory\"}]}, \"OvsPmdCoreList\": {\"if\": [\"pmd_cores_empty\", {\"get_param\": \"NeutronDpdkCoreList\"}, {\"get_param\": \"OvsPmdCoreList\"}]}, \"OvsDpdkCoreList\": {\"if\": [\"l_cores_empty\", {\"get_param\": \"HostCpusList\"}, {\"get_param\": \"OvsDpdkCoreList\"}]}}}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"shell\": \"rpm -qa | awk -F- '/^openvswitch-2/{print $2 \\\"-\\\" $3}'\", \"name\": \"Check openvswitch version.\", \"tags\": \"step2\", \"register\": \"ovs_version\"}, {\"ignore_errors\": true, \"shell\": \"rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q \\\"systemctl.*try-restart\\\"\", \"register\": \"ovs_packaging_issue\", \"name\": \"Check openvswitch packaging.\", \"tags\": \"step2\"}, {\"when\": \"'2.5.0-14' in ovs_version.stdout|default('') or ovs_packaging_issue|default(false)|succeeded\", \"block\": [{\"name\": \"Ensure empty directory: emptying.\", \"file\": {\"path\": \"/root/OVS_UPGRADE\", \"state\": \"absent\"}}, {\"name\": \"Ensure empty directory: creating.\", \"file\": {\"owner\": \"root\", \"path\": \"/root/OVS_UPGRADE\", \"state\": \"directory\", \"group\": \"root\", \"mode\": 488}}, {\"command\": \"yum makecache\", \"name\": \"Make yum cache.\"}, {\"command\": \"yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch\", \"name\": \"Download OVS packages.\"}, {\"shell\": \"ls -1 /root/OVS_UPGRADE/*.rpm\", \"name\": \"Get rpm list for manual upgrade of OVS.\", \"register\": \"ovs_list_of_rpms\"}, {\"shell\": \"rpm -U --test {{item}} 2>&1 | grep \\\"already installed\\\" || \\\\\
0.355 | 3311: rpm -U --replacepkgs --notriggerun --nopostun {{item}};\
0.355 | 3311: \", \"name\": \"Manual upgrade of OVS\", \"args\": {\"chdir\": \"/root/OVS_UPGRADE\"}, \"with_items\": [\"{{ovs_list_of_rpms.stdout_lines}}\"]}], \"tags\": \"step2\"}]}}}, \"conditions\": {\"pmd_cores_empty\": {\"equals\": [{\"get_param\": \"OvsPmdCoreList\"}, \"\"]}, \"l_cores_empty\": {\"equals\": [{\"get_param\": \"OvsDpdkCoreList\"}, \"\"]}, \"socket_mem_empty\": {\"equals\": [{\"get_param\": \"OvsDpdkSocketMemory\"}, \"\"]}, \"mem_channels_empty\": {\"equals\": [{\"get_param\": \"OvsDpdkMemoryChannels\"}, \"\"]}, \"driver_not_set\": {\"equals\": [{\"get_param\": \"OvsDpdkDriverType\"}, \"vfio-pci\"]}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/controller-role.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Controller node configured by Puppet\", \"parameters\": {\"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"ExtraConfig\": {\"default\": {}, \"type\": \"json\", \"description\": \"Additional hiera configuration to inject into the cluster. Note\
0.355 | 3311: that ControllerExtraConfig takes precedence over ExtraConfig.\
0.355 | 3311: \"}, \"ConfigCommand\": {\"default\": \"os-refresh-config --timeout 14400\", \"type\": \"string\", \"description\": \"Command which will be run whenever configuration data changes\"}, \"NetworkDeploymentActions\": {\"default\": [\"CREATE\"], \"type\": \"comma_delimited_list\", \"description\": \"Heat action when to apply network configuration changes\
0.355 | 3311: \"}, \"Hostname\": {\"default\": \"\", \"type\": \"string\"}, \"ControllerIPs\": {\"default\": {}, \"type\": \"json\"}, \"ControllerExtraConfig\": {\"default\": {}, \"type\": \"json\", \"description\": \"Role specific additional hiera configuration to inject into the cluster.\
0.355 | 3311: \"}, \"ControllerNetworkDeploymentActions\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"Heat action when to apply network configuration changes\
0.355 | 3311: \"}, \"CloudDomain\": {\"default\": \"localdomain\", \"type\": \"string\", \"description\": \"The DNS domain used for the hosts. This must match the overcloud_domain_name configured on the undercloud.\
0.355 | 3311: \"}, \"ConfigCollectSplay\": {\"default\": 30, \"type\": \"number\", \"description\": \"Maximum amount of time to possibly to delay configuation collection\
0.355 | 3311: polling. Defaults to 30 seconds. Set to 0 to disable it which will cause\
0.355 | 3311: the configuration collection to occur as soon as the collection process\
0.355 | 3311: starts. This setting is used to prevent the configuration collection\
0.355 | 3311: processes from polling all at the exact same time.\
0.355 | 3311: \"}, \"ServerMetadata\": {\"default\": {}, \"type\": \"json\", \"description\": \"Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API. This applies to all roles and is merged with a role-specific metadata parameter.\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry.\"}, \"ServiceMetadataSettings\": {\"default\": {}, \"type\": \"json\"}, \"DeploymentServerBlacklistDict\": {\"default\": {}, \"type\": \"json\", \"description\": \"Map of server hostnames to blacklist from any triggered deployments. If the value is 1, the server will be blacklisted. This parameter is generated from the parent template.\
0.355 | 3311: \"}, \"ControllerServerMetadata\": {\"default\": {}, \"type\": \"json\", \"description\": \"Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API. This option is role-specific and is merged with the values given to the ServerMetadata parameter.\
0.355 | 3311: \"}, \"KeyName\": {\"default\": \"default\", \"type\": \"string\", \"description\": \"Name of an existing Nova key pair to enable SSH access to the instances\", \"constraints\": [{\"custom_constraint\": \"nova.keypair\"}]}, \"LoggingSources\": {\"default\": [], \"type\": \"json\"}, \"ControllerImage\": {\"default\": \"overcloud-full\", \"type\": \"string\", \"description\": \"The disk image file to use for the role.\", \"constraints\": [{\"custom_constraint\": \"glance.image\"}]}, \"SoftwareConfigTransport\": {\"default\": \"POLL_SERVER_CFN\", \"type\": \"string\", \"description\": \"How the server should receive the metadata required for software configuration.\
0.355 | 3311: \", \"constraints\": [{\"allowed_values\": [\"POLL_SERVER_CFN\", \"POLL_SERVER_HEAT\", \"POLL_TEMP_URL\", \"ZAQAR_MESSAGE\"]}]}, \"NodeIndex\": {\"default\": 0, \"type\": \"number\"}, \"UpgradeInitCommonCommand\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Common commands required by the upgrades process. This should not\
0.355 | 3311: normally be modified by the operator and is set and unset in the\
0.355 | 3311: major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml\
0.355 | 3311: environment files.\
0.355 | 3311: \"}, \"ServiceConfigSettings\": {\"default\": {}, \"type\": \"json\"}, \"ImageUpdatePolicy\": {\"default\": \"REBUILD_PRESERVE_EPHEMERAL\", \"type\": \"string\", \"description\": \"What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.\"}, \"OvercloudControllerFlavor\": {\"default\": \"baremetal\", \"type\": \"string\", \"description\": \"Flavor for the Controller node.\", \"constraints\": [{\"custom_constraint\": \"nova.flavor\"}]}, \"NeutronPublicInterface\": {\"default\": \"nic1\", \"type\": \"string\", \"description\": \"Which interface to add to the NeutronPhysicalBridge.\"}, \"UpgradeInitCommand\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Command or script snippet to run on all overcloud nodes to\
0.355 | 3311: initialize the upgrade process. E.g. a repository switch.\
0.355 | 3311: \"}, \"NeutronPhysicalBridge\": {\"default\": \"br-ex\", \"type\": \"string\", \"description\": \"An OVS bridge to create for accessing external networks.\"}, \"MonitoringSubscriptions\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"LoggingGroups\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"ControllerSchedulerHints\": {\"default\": {}, \"type\": \"json\", \"description\": \"Optional scheduler hints to pass to nova\"}, \"ServiceNames\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"DeploymentSwiftDataMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Map of servers to Swift container and object for storing deployment data.\
0.355 | 3311: The keys are the Heat assigned hostnames, and the value is a map of the\
0.355 | 3311: container/object name in Swift. Example value:\
0.355 | 3311: overcloud-controller-0:\
0.355 | 3311: container: overcloud-controller\
0.355 | 3311: object: 0\
0.355 | 3311: overcloud-controller-1:\
0.355 | 3311: container: overcloud-controller\
0.355 | 3311: object: 1\
0.355 | 3311: overcloud-controller-2:\
0.355 | 3311: container: overcloud-controller\
0.355 | 3311: object: 2\
0.355 | 3311: overcloud-novacompute-0:\
0.355 | 3311: container: overcloud-compute\
0.355 | 3311: object: 0\
0.355 | 3311: \"}, \"HostnameMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Optional mapping to override hostnames\"}, \"UpdateIdentifier\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Setting to a previously unused value during stack-update will trigger package update on all nodes\
0.355 | 3311: \"}}, \"outputs\": {\"storage_mgmt_ip_address\": {\"description\": \"IP address of the server in the StorageMgmt network\", \"value\": {\"get_attr\": [\"StorageMgmtPort\", \"ip_address\"]}}, \"tls_cert_modulus_md5\": {\"description\": \"MD5 checksum of the TLS Certificate Modulus\", \"value\": {\"get_attr\": [\"NodeTLSData\", \"cert_modulus_md5\"]}}, \"nova_server_resource\": {\"value\": {\"get_resource\": \"Controller\"}, \"description\": \"Heat resource handle for Controller server\", \"condition\": \"server_not_blacklisted\"}, \"tenant_ip_address\": {\"description\": \"IP address of the server in the Tenant network\", \"value\": {\"get_attr\": [\"TenantPort\", \"ip_address\"]}}, \"deployed_server_port_map\": {\"description\": \"Map of Heat created hostname of the server to ip address. This is the\
0.355 | 3311: hostname before it has been mapped with the HostnameMap parameter, and\
0.355 | 3311: the IP address from the ctlplane network. This map can be used to construct\
0.355 | 3311: the DeployedServerPortMap parameter when using split-stack.\
0.355 | 3311: \", \"value\": {\"map_replace\": [{\"hostname\": {\"fixed_ips\": [{\"ip_address\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}}]}}, {\"keys\": {\"hostname\": {\"list_join\": [\"-\", [{\"get_param\": \"Hostname\"}, \"ctlplane\"]]}}}]}}, \"internal_api_ip_address\": {\"description\": \"IP address of the server in the InternalApi network\", \"value\": {\"get_attr\": [\"InternalApiPort\", \"ip_address\"]}}, \"deployed_server_deployment_swift_data_map\": {\"description\": \"Map of Heat created hostname of the server to the Swift container and object used to created the temporary url for metadata polling with os-collect-config.\", \"value\": {\"map_replace\": [{\"hostname\": {\"object\": {\"str_split\": [\"?\", {\"str_split\": [\"/\", {\"get_attr\": [\"Controller\", \"os_collect_config\", \"request\", \"metadata_url\"]}, 6]}, 0]}, \"container\": {\"str_split\": [\"/\", {\"get_attr\": [\"Controller\", \"os_collect_config\", \"request\", \"metadata_url\"]}, 5]}}}, {\"keys\": {\"hostname\": {\"get_param\": \"Hostname\"}}}]}}, \"hostname_map\": {\"description\": \"Mapping of network names to hostnames\", \"value\": {\"ctlplane\": {\"get_attr\": [\"NetHostMap\", \"value\", \"ctlplane\", \"fqdn\"]}, \"management\": {\"get_attr\": [\"NetHostMap\", \"value\", \"management\", \"fqdn\"]}, \"external\": {\"get_attr\": [\"NetHostMap\", \"value\", \"external\", \"fqdn\"]}, \"internal_api\": {\"get_attr\": [\"NetHostMap\", \"value\", \"internal_api\", \"fqdn\"]}, \"storage_mgmt\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage_mgmt\", \"fqdn\"]}, \"storage\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage\", \"fqdn\"]}, \"tenant\": {\"get_attr\": [\"NetHostMap\", \"value\", \"tenant\", \"fqdn\"]}}}, \"ip_address\": {\"description\": \"IP address of the server in the ctlplane network\", \"value\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}}, \"management_ip_address\": {\"description\": \"IP address of the server in the Management network\", \"value\": {\"get_attr\": [\"ManagementPort\", \"ip_address\"]}}, \"tls_key_modulus_md5\": {\"description\": \"MD5 checksum of the TLS Key Modulus\", \"value\": {\"get_attr\": [\"NodeTLSData\", \"key_modulus_md5\"]}}, \"hostname\": {\"description\": \"Hostname of the server\", \"value\": {\"get_attr\": [\"Controller\", \"name\"]}}, \"external_ip_address\": {\"description\": \"IP address of the server in the External network\", \"value\": {\"get_attr\": [\"ExternalPort\", \"ip_address\"]}}, \"known_hosts_entry\": {\"description\": \"Entry for ssh known hosts\", \"value\": {\"str_replace\": {\"params\": {\"StorageMgmtHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage_mgmt\", \"short\"]}, \"DOMAIN\": {\"get_param\": \"CloudDomain\"}, \"TenantHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"tenant\", \"short\"]}, \"CTLPLANEHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"ctlplane\", \"short\"]}, \"ManagementHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"management\", \"short\"]}, \"InternalApiIP\": {\"get_attr\": [\"InternalApiPort\", \"ip_address\"]}, \"HOSTSSHPUBKEY\": {\"get_attr\": [\"SshHostPubKey\", \"ecdsa\"]}, \"CTLPLANEIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"ExternalHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"external\", \"short\"]}, \"ExternalIP\": {\"get_attr\": [\"ExternalPort\", \"ip_address\"]}, \"StorageIP\": {\"get_attr\": [\"StoragePort\", \"ip_address\"]}, \"PRIMARYHOST\": {\"get_attr\": [\"Controller\", \"name\"]}, \"InternalApiHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"internal_api\", \"short\"]}, \"ManagementIP\": {\"get_attr\": [\"ManagementPort\", \"ip_address\"]}, \"StorageHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage\", \"short\"]}, \"TenantIP\": {\"get_attr\": [\"TenantPort\", \"ip_address\"]}, \"PRIMARYIP\": {\"get_attr\": [\"NetIpMap\", \"net_ip_map\", {\"get_param\": [\"ServiceNetMap\", \"ControllerHostnameResolveNetwork\"]}]}, \"StorageMgmtIP\": {\"get_attr\": [\"StorageMgmtPort\", \"ip_address\"]}}, \"template\": \"PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,ExternalIP,ExternalHOST.DOMAIN,ExternalHOST,InternalApiIP,InternalApiHOST.DOMAIN,InternalApiHOST,StorageIP,StorageHOST.DOMAIN,StorageHOST,StorageMgmtIP,StorageMgmtHOST.DOMAIN,StorageMgmtHOST,TenantIP,TenantHOST.DOMAIN,TenantHOST,ManagementIP,ManagementHOST.DOMAIN,ManagementHOST,CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY\"}}}, \"hosts_entry\": {\"value\": {\"str_replace\": {\"params\": {\"StorageMgmtHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage_mgmt\", \"short\"]}, \"DOMAIN\": {\"get_param\": \"CloudDomain\"}, \"TenantHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"tenant\", \"short\"]}, \"CTLPLANEHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"ctlplane\", \"short\"]}, \"ManagementHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"management\", \"short\"]}, \"InternalApiIP\": {\"get_attr\": [\"InternalApiPort\", \"ip_address\"]}, \"CTLPLANEIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"ExternalHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"external\", \"short\"]}, \"ExternalIP\": {\"get_attr\": [\"ExternalPort\", \"ip_address\"]}, \"StorageIP\": {\"get_attr\": [\"StoragePort\", \"ip_address\"]}, \"PRIMARYHOST\": {\"get_attr\": [\"Controller\", \"name\"]}, \"InternalApiHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"internal_api\", \"short\"]}, \"ManagementIP\": {\"get_attr\": [\"ManagementPort\", \"ip_address\"]}, \"StorageHOST\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage\", \"short\"]}, \"TenantIP\": {\"get_attr\": [\"TenantPort\", \"ip_address\"]}, \"PRIMARYIP\": {\"get_attr\": [\"NetIpMap\", \"net_ip_map\", {\"get_param\": [\"ServiceNetMap\", \"ControllerHostnameResolveNetwork\"]}]}, \"StorageMgmtIP\": {\"get_attr\": [\"StorageMgmtPort\", \"ip_address\"]}}, \"template\": \"PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST\
0.355 | 3311: ExternalIP ExternalHOST.DOMAIN ExternalHOST\
0.355 | 3311: InternalApiIP InternalApiHOST.DOMAIN InternalApiHOST\
0.355 | 3311: StorageIP StorageHOST.DOMAIN StorageHOST\
0.355 | 3311: StorageMgmtIP StorageMgmtHOST.DOMAIN StorageMgmtHOST\
0.355 | 3311: TenantIP TenantHOST.DOMAIN TenantHOST\
0.355 | 3311: ManagementIP ManagementHOST.DOMAIN ManagementHOST\
0.355 | 3311: CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST\
0.355 | 3311: \"}}}, \"storage_ip_address\": {\"description\": \"IP address of the server in the Storage network\", \"value\": {\"get_attr\": [\"StoragePort\", \"ip_address\"]}}, \"os_collect_config\": {\"description\": \"The os-collect-config configuration associated with this server resource\", \"value\": {\"get_attr\": [\"Controller\", \"os_collect_config\"]}}}, \"conditions\": {\"server_not_blacklisted\": {\"not\": {\"equals\": [{\"get_param\": [\"DeploymentServerBlacklistDict\", {\"get_param\": \"Hostname\"}]}, 1]}}, \"role_network_deployment_actions_exists\": {\"not\": {\"equals\": [{\"get_param\": \"ControllerNetworkDeploymentActions\"}, []]}}, \"deployment_swift_data_map_unset\": {\"equals\": [{\"get_param\": [\"DeploymentSwiftDataMap\", {\"get_param\": \"Hostname\"}]}, \"\"]}}, \"resources\": {\"ControllerUpgradeInitDeployment\": {\"depends_on\": \"NetworkDeployment\", \"type\": \"OS::Heat::SoftwareDeployment\", \"properties\": {\"config\": {\"get_resource\": \"ControllerUpgradeInitConfig\"}, \"name\": \"ControllerUpgradeInitDeployment\", \"actions\": {\"if\": [\"server_not_blacklisted\", [\"CREATE\", \"UPDATE\"], []]}, \"server\": {\"get_resource\": \"Controller\"}}}, \"NodeUserData\": {\"type\": \"OS::TripleO::NodeUserData\"}, \"ControllerExtraConfigPre\": {\"depends_on\": \"ControllerDeployment\", \"type\": \"OS::TripleO::ControllerExtraConfigPre\", \"properties\": {\"server\": {\"get_resource\": \"Controller\"}}, \"condition\": \"server_not_blacklisted\"}, \"NodeTLSCAData\": {\"depends_on\": \"NetworkDeployment\", \"type\": \"OS::TripleO::NodeTLSCAData\", \"properties\": {\"server\": {\"get_resource\": \"Controller\"}}}, \"ManagementPort\": {\"type\": \"OS::TripleO::Controller::Ports::ManagementPort\", \"properties\": {\"NodeIndex\": {\"get_param\": \"NodeIndex\"}, \"ControlPlaneIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"IPPool\": {\"map_merge\": [{\"get_param\": \"ControllerIPs\"}]}}}, \"TenantPort\": {\"type\": \"OS::TripleO::Controller::Ports::TenantPort\", \"properties\": {\"NodeIndex\": {\"get_param\": \"NodeIndex\"}, \"ControlPlaneIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"IPPool\": {\"map_merge\": [{\"get_param\": \"ControllerIPs\"}]}}}, \"ExternalPort\": {\"type\": \"OS::TripleO::Controller::Ports::ExternalPort\", \"properties\": {\"NodeIndex\": {\"get_param\": \"NodeIndex\"}, \"ControlPlaneIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"IPPool\": {\"map_merge\": [{\"get_param\": \"ControllerIPs\"}]}}}, \"NetworkConfig\": {\"type\": \"OS::TripleO::Controller::Net::SoftwareConfig\", \"properties\": {\"StorageIpSubnet\": {\"get_attr\": [\"StoragePort\", \"ip_subnet\"]}, \"StorageMgmtIpSubnet\": {\"get_attr\": [\"StorageMgmtPort\", \"ip_subnet\"]}, \"TenantIpSubnet\": {\"get_attr\": [\"TenantPort\", \"ip_subnet\"]}, \"ManagementIpSubnet\": {\"get_attr\": [\"ManagementPort\", \"ip_subnet\"]}, \"ControlPlaneIp\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"InternalApiIpSubnet\": {\"get_attr\": [\"InternalApiPort\", \"ip_subnet\"]}, \"ExternalIpSubnet\": {\"get_attr\": [\"ExternalPort\", \"ip_subnet\"]}}}, \"StorageMgmtPort\": {\"type\": \"OS::TripleO::Controller::Ports::StorageMgmtPort\", \"properties\": {\"NodeIndex\": {\"get_param\": \"NodeIndex\"}, \"ControlPlaneIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"IPPool\": {\"map_merge\": [{\"get_param\": \"ControllerIPs\"}]}}}, \"DeploymentActions\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"value\": {\"if\": [\"server_not_blacklisted\", [\"CREATE\", \"UPDATE\"], []]}}}, \"ControllerDeployment\": {\"depends_on\": \"ControllerUpgradeInitDeployment\", \"type\": \"OS::Heat::StructuredDeployment\", \"properties\": {\"input_values\": {\"enable_package_upgrade\": {\"get_attr\": [\"UpdateDeployment\", \"update_managed_packages\"]}}, \"config\": {\"get_resource\": \"ControllerConfig\"}, \"name\": \"ControllerDeployment\", \"actions\": {\"if\": [\"server_not_blacklisted\", [\"CREATE\", \"UPDATE\"], []]}, \"server\": {\"get_resource\": \"Controller\"}}}, \"UpdateConfig\": {\"type\": \"OS::TripleO::Tasks::PackageUpdate\"}, \"InternalApiPort\": {\"type\": \"OS::TripleO::Controller::Ports::InternalApiPort\", \"properties\": {\"NodeIndex\": {\"get_param\": \"NodeIndex\"}, \"ControlPlaneIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"IPPool\": {\"map_merge\": [{\"get_param\": \"ControllerIPs\"}]}}}, \"NodeTLSData\": {\"depends_on\": \"NodeTLSCAData\", \"type\": \"OS::TripleO::NodeTLSData\", \"properties\": {\"NodeIndex\": {\"get_param\": \"NodeIndex\"}, \"server\": {\"get_resource\": \"Controller\"}}}, \"NodeExtraConfig\": {\"depends_on\": [\"ControllerExtraConfigPre\", \"NodeTLSData\"], \"type\": \"OS::TripleO::NodeExtraConfig\", \"properties\": {\"server\": {\"get_resource\": \"Controller\"}}, \"condition\": \"server_not_blacklisted\"}, \"UpdateDeployment\": {\"depends_on\": \"NetworkDeployment\", \"type\": \"OS::Heat::SoftwareDeployment\", \"properties\": {\"input_values\": {\"update_identifier\": {\"get_param\": \"UpdateIdentifier\"}}, \"config\": {\"get_resource\": \"UpdateConfig\"}, \"name\": \"UpdateDeployment\", \"actions\": {\"if\": [\"server_not_blacklisted\", [\"CREATE\", \"UPDATE\"], []]}, \"server\": {\"get_resource\": \"Controller\"}}}, \"UserData\": {\"type\": \"OS::Heat::MultipartMime\", \"properties\": {\"parts\": [{\"config\": {\"get_resource\": \"NodeAdminUserData\"}, \"type\": \"multipart\"}, {\"config\": {\"get_resource\": \"NodeUserData\"}, \"type\": \"multipart\"}, {\"config\": {\"get_resource\": \"RoleUserData\"}, \"type\": \"multipart\"}]}}, \"ControllerConfig\": {\"type\": \"OS::Heat::StructuredConfig\", \"properties\": {\"group\": \"hiera\", \"config\": {\"hierarchy\": [\"\\\"%{::uuid}\\\"\", \"heat_config_%{::deploy_config_name}\", \"config_step\", \"controller_extraconfig\", \"extraconfig\", \"service_names\", \"service_configs\", \"controller\", \"bootstrap_node\", \"all_nodes\", \"vip_data\", \"\\\"%{::osfamily}\\\"\", \"neutron_bigswitch_data\", \"neutron_cisco_data\", \"cisco_n1kv_data\", \"midonet_data\", \"cisco_aci_data\"], \"datafiles\": {\"service_names\": {\"sensu::subscriptions\": {\"get_param\": \"MonitoringSubscriptions\"}, \"service_names\": {\"get_param\": \"ServiceNames\"}}, \"controller\": {\"tripleo::packages::enable_upgrade\": {\"get_input\": \"enable_package_upgrade\"}, \"tripleo::profile::base::logging::fluentd::fluentd_sources\": {\"get_param\": \"LoggingSources\"}, \"tripleo::profile::base::logging::fluentd::fluentd_groups\": {\"get_param\": \"LoggingGroups\"}, \"fqdn_internal_api\": {\"get_attr\": [\"NetHostMap\", \"value\", \"internal_api\", \"fqdn\"]}, \"fqdn_storage_mgmt\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage_mgmt\", \"fqdn\"]}, \"fqdn_management\": {\"get_attr\": [\"NetHostMap\", \"value\", \"management\", \"fqdn\"]}, \"fqdn_external\": {\"get_attr\": [\"NetHostMap\", \"value\", \"external\", \"fqdn\"]}, \"fqdn_storage\": {\"get_attr\": [\"NetHostMap\", \"value\", \"storage\", \"fqdn\"]}, \"fqdn_ctlplane\": {\"get_attr\": [\"NetHostMap\", \"value\", \"ctlplane\", \"fqdn\"]}, \"fqdn_tenant\": {\"get_attr\": [\"NetHostMap\", \"value\", \"tenant\", \"fqdn\"]}}, \"extraconfig\": {\"get_param\": \"ExtraConfig\"}, \"service_configs\": {\"map_replace\": [{\"get_param\": \"ServiceConfigSettings\"}, {\"values\": {\"get_attr\": [\"NetIpMap\", \"net_ip_map\"]}}]}, \"controller_extraconfig\": {\"map_merge\": [{\"get_param\": \"ControllerExtraConfig\"}]}}, \"merge_behavior\": \"deeper\"}}}, \"PreNetworkConfig\": {\"type\": \"OS::TripleO::Controller::PreNetworkConfig\", \"properties\": {\"RoleParameters\": {\"get_param\": \"RoleParameters\"}, \"deployment_actions\": {\"get_attr\": [\"DeploymentActions\", \"value\"]}, \"server\": {\"get_resource\": \"Controller\"}, \"ServiceNames\": {\"get_param\": \"ServiceNames\"}}}, \"StoragePort\": {\"type\": \"OS::TripleO::Controller::Ports::StoragePort\", \"properties\": {\"NodeIndex\": {\"get_param\": \"NodeIndex\"}, \"ControlPlaneIP\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}, \"IPPool\": {\"map_merge\": [{\"get_param\": \"ControllerIPs\"}]}}}, \"NetworkDeployment\": {\"depends_on\": \"PreNetworkConfig\", \"type\": \"OS::TripleO::SoftwareDeployment\", \"properties\": {\"input_values\": {\"interface_name\": {\"get_param\": \"NeutronPublicInterface\"}, \"bridge_name\": {\"get_param\": \"NeutronPhysicalBridge\"}}, \"config\": {\"get_resource\": \"NetworkConfig\"}, \"name\": \"NetworkDeployment\", \"actions\": {\"if\": [\"server_not_blacklisted\", {\"if\": [\"role_network_deployment_actions_exists\", {\"get_param\": \"ControllerNetworkDeploymentActions\"}, {\"get_param\": \"NetworkDeploymentActions\"}]}, []]}, \"server\": {\"get_resource\": \"Controller\"}}}, \"NetHostMap\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"ctlplane\": {\"short\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"ctlplane\"]]}, \"fqdn\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"ctlplane\", {\"get_param\": \"CloudDomain\"}]]}}, \"management\": {\"short\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"management\"]]}, \"fqdn\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"management\", {\"get_param\": \"CloudDomain\"}]]}}, \"external\": {\"short\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"external\"]]}, \"fqdn\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"external\", {\"get_param\": \"CloudDomain\"}]]}}, \"internal_api\": {\"short\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"internalapi\"]]}, \"fqdn\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"internalapi\", {\"get_param\": \"CloudDomain\"}]]}}, \"storage_mgmt\": {\"short\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"storagemgmt\"]]}, \"fqdn\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"storagemgmt\", {\"get_param\": \"CloudDomain\"}]]}}, \"storage\": {\"short\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"storage\"]]}, \"fqdn\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"storage\", {\"get_param\": \"CloudDomain\"}]]}}, \"tenant\": {\"short\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"tenant\"]]}, \"fqdn\": {\"list_join\": [\".\", [{\"get_attr\": [\"Controller\", \"name\"]}, \"tenant\", {\"get_param\": \"CloudDomain\"}]]}}}}}, \"Controller\": {\"type\": \"OS::TripleO::ControllerServer\", \"properties\": {\"deployment_swift_data\": {\"if\": [\"deployment_swift_data_map_unset\", {}, {\"get_param\": [\"DeploymentSwiftDataMap\", {\"get_param\": \"Hostname\"}]}]}, \"user_data_format\": \"SOFTWARE_CONFIG\", \"name\": {\"str_replace\": {\"params\": {\"get_param\": \"HostnameMap\"}, \"template\": {\"get_param\": \"Hostname\"}}}, \"key_name\": {\"get_param\": \"KeyName\"}, \"image\": {\"get_param\": \"ControllerImage\"}, \"image_update_policy\": {\"get_param\": \"ImageUpdatePolicy\"}, \"user_data\": {\"get_resource\": \"UserData\"}, \"metadata\": {\"map_merge\": [{\"get_param\": \"ServerMetadata\"}, {\"get_param\": \"ControllerServerMetadata\"}, {\"get_param\": \"ServiceMetadataSettings\"}]}, \"flavor\": {\"get_param\": \"OvercloudControllerFlavor\"}, \"software_config_transport\": {\"get_param\": \"SoftwareConfigTransport\"}, \"networks\": [{\"network\": \"ctlplane\"}], \"scheduler_hints\": {\"get_param\": \"ControllerSchedulerHints\"}}, \"metadata\": {\"os-collect-config\": {\"splay\": {\"get_param\": \"ConfigCollectSplay\"}, \"command\": {\"get_param\": \"ConfigCommand\"}}}}, \"SshHostPubKey\": {\"depends_on\": \"ControllerDeployment\", \"type\": \"OS::TripleO::Ssh::HostPubKey\", \"properties\": {\"deployment_actions\": {\"get_attr\": [\"DeploymentActions\", \"value\"]}, \"server\": {\"get_resource\": \"Controller\"}}}, \"RoleUserData\": {\"type\": \"OS::TripleO::Controller::NodeUserData\"}, \"NodeAdminUserData\": {\"type\": \"OS::TripleO::NodeAdminUserData\"}, \"ControllerUpgradeInitConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"list_join\": [\"\", [\"#!/bin/bash\
0.355 | 3311: \
0.355 | 3311: \", \"if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\
0.355 | 3311: \
0.355 | 3311: \", {\"get_param\": \"UpgradeInitCommand\"}, {\"get_param\": \"UpgradeInitCommonCommand\"}]]}}}, \"NetIpMap\": {\"type\": \"OS::TripleO::Network::Ports::NetIpMap\", \"properties\": {\"ExternalIp\": {\"get_attr\": [\"ExternalPort\", \"ip_address\"]}, \"StorageIp\": {\"get_attr\": [\"StoragePort\", \"ip_address\"]}, \"ManagementIpSubnet\": {\"get_attr\": [\"ManagementPort\", \"ip_subnet\"]}, \"TenantIpSubnet\": {\"get_attr\": [\"TenantPort\", \"ip_subnet\"]}, \"StorageIpSubnet\": {\"get_attr\": [\"StoragePort\", \"ip_subnet\"]}, \"ManagementIpUri\": {\"get_attr\": [\"ManagementPort\", \"ip_address_uri\"]}, \"StorageMgmtIpSubnet\": {\"get_attr\": [\"StorageMgmtPort\", \"ip_subnet\"]}, \"StorageIpUri\": {\"get_attr\": [\"StoragePort\", \"ip_address_uri\"]}, \"TenantIp\": {\"get_attr\": [\"TenantPort\", \"ip_address\"]}, \"ExternalIpUri\": {\"get_attr\": [\"ExternalPort\", \"ip_address_uri\"]}, \"StorageMgmtIp\": {\"get_attr\": [\"StorageMgmtPort\", \"ip_address\"]}, \"StorageMgmtIpUri\": {\"get_attr\": [\"StorageMgmtPort\", \"ip_address_uri\"]}, \"InternalApiIp\": {\"get_attr\": [\"InternalApiPort\", \"ip_address\"]}, \"InternalApiIpUri\": {\"get_attr\": [\"InternalApiPort\", \"ip_address_uri\"]}, \"ExternalIpSubnet\": {\"get_attr\": [\"ExternalPort\", \"ip_subnet\"]}, \"InternalApiIpSubnet\": {\"get_attr\": [\"InternalApiPort\", \"ip_subnet\"]}, \"TenantIpUri\": {\"get_attr\": [\"TenantPort\", \"ip_address_uri\"]}, \"ManagementIp\": {\"get_attr\": [\"ManagementPort\", \"ip_address\"]}, \"ControlPlaneIp\": {\"get_attr\": [\"Controller\", \"networks\", \"ctlplane\", 0]}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-central.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Ceilometer Central Agent role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"CeilometerServiceBase\", \"role_data\", \"config_settings\"]}, {\"ceilometer_redis_password\": {\"get_param\": \"RedisPassword\"}, \"central_namespace\": true}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"ceilometer_agent_central_enabled\", \"command\": \"systemctl is-enabled openstack-ceilometer-central\", \"name\": \"Check if ceilometer_agent_central is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-ceilometer-central' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"ceilometer_agent_central_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-ceilometer-central is running\", \"tags\": \"step0,validation\"}, {\"when\": \"ceilometer_agent_central_enabled.rc == 0\", \"name\": \"Stop ceilometer_agent_central service\", \"service\": \"name=openstack-ceilometer-central state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"ceilometer\"], \"service_name\": \"ceilometer_agent_central\", \"step_config\": \"include ::tripleo::profile::base::ceilometer::agent::polling\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionCeilometerCentral\"}, \"service_config_settings\": {\"get_attr\": [\"CeilometerServiceBase\", \"role_data\", \"service_config_settings\"]}, \"logging_source\": {\"get_param\": \"CeilometerAgentCentralLoggingSource\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Ceilometer Central Agent service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"CeilometerAgentCentralLoggingSource\": {\"default\": {\"path\": \"/var/log/ceilometer/central.log\", \"tag\": \"openstack.ceilometer.agent.central\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RedisPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the redis service account.\"}, \"MonitoringSubscriptionCeilometerCentral\": {\"default\": \"overcloud-ceilometer-agent-central\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"CeilometerServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pacemaker_maintenance_mode.sh": "#!/bin/bash
0.355 | 3311: set -x
0.355 | 3311:
0.355 | 3311: # On initial deployment, the pacemaker service is disabled and is-active exits
0.355 | 3311: # 3 in that case, so allow this to fail gracefully.
0.355 | 3311: pacemaker_status=$(systemctl is-active pacemaker || :)
0.355 | 3311:
0.355 | 3311: if [ \"$pacemaker_status\" = \"active\" ]; then
0.355 | 3311: pcs property set maintenance-mode=true
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: # We need to reload haproxy in case the certificate changed because
0.355 | 3311: # puppet doesn't know the contents of the cert file. We shouldn't
0.355 | 3311: # reload it if it wasn't already active (such as if using external
0.355 | 3311: # loadbalancer or on initial deployment).
0.355 | 3311: haproxy_status=$(systemctl is-active haproxy || :)
0.355 | 3311: if [ \"$haproxy_status\" = \"active\" ]; then
0.355 | 3311: systemctl reload haproxy
0.355 | 3311: fi
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-notification.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Ceilometer Notification Agent role.\", \"value\": {\"config_settings\": {\"get_attr\": [\"CeilometerServiceBase\", \"role_data\", \"config_settings\"]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"ceilometer_agent_notification_enabled\", \"command\": \"systemctl is-enabled openstack-ceilometer-notification\", \"name\": \"Check if ceilometer_agent_notification is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-ceilometer-notification' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"ceilometer_agent_notification_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-ceilometer-notification is running\", \"tags\": \"step0,validation\"}, {\"when\": \"ceilometer_agent_notification_enabled.rc == 0\", \"name\": \"Stop ceilometer_agent_notification service\", \"service\": \"name=openstack-ceilometer-notification state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"ceilometer\"], \"service_name\": \"ceilometer_agent_notification\", \"step_config\": \"include ::tripleo::profile::base::ceilometer::agent::notification\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionCeilometerNotification\"}, \"service_config_settings\": {\"get_attr\": [\"CeilometerServiceBase\", \"role_data\", \"service_config_settings\"]}, \"logging_source\": {\"get_param\": \"CeilometerAgentNotificationLoggingSource\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Ceilometer Notification Agent service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"CeilometerAgentNotificationLoggingSource\": {\"default\": {\"path\": \"/var/log/ceilometer/agent-notification.log\", \"tag\": \"openstack.ceilometer.agent.notification\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"MonitoringSubscriptionCeilometerNotification\": {\"default\": \"overcloud-ceilometer-agent-notification\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"CeilometerServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ca-certs.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for injecting CA certificates.\", \"value\": {\"service_name\": \"ca_certs\", \"step_config\": \"include ::tripleo::trusted_cas\", \"config_settings\": {\"tripleo::trusted_cas::ca_map\": {\"get_param\": \"CAMap\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"HAproxy service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"CAMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Map containing the CA certs and information needed for deploying them.\
0.355 | 3311: \"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-scheduler.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Cinder Scheduler role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionCinderScheduler\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"CinderBase\", \"role_data\", \"config_settings\"]}, {\"cinder::scheduler::scheduler_driver\": \"cinder.scheduler.filter_scheduler.FilterScheduler\"}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"cinder_scheduler_enabled\", \"command\": \"systemctl is-enabled openstack-cinder-scheduler\", \"name\": \"Check if cinder_scheduler is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-cinder-scheduler' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"cinder_scheduler_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-cinder-scheduler is running\", \"tags\": \"step0,validation\"}, {\"when\": \"cinder_scheduler_enabled.rc == 0\", \"name\": \"Stop cinder_scheduler service\", \"service\": \"name=openstack-cinder-scheduler state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"cinder\"], \"service_name\": \"cinder_scheduler\", \"logging_source\": {\"get_param\": \"CinderSchedulerLoggingSource\"}, \"step_config\": \"include ::tripleo::profile::base::cinder::scheduler\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Cinder Scheduler service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"MonitoringSubscriptionCinderScheduler\": {\"default\": \"overcloud-cinder-scheduler\", \"type\": \"string\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"CinderSchedulerLoggingSource\": {\"default\": {\"path\": \"/var/log/cinder/cinder-scheduler.log\", \"tag\": \"openstack.cinder.scheduler\"}, \"type\": \"json\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"CinderBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-proxy.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Swift Proxy service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"SwiftProxyNodeTimeout\": {\"default\": 60, \"type\": \"number\", \"description\": \"Timeout for requests going from swift-proxy to swift a/c/o services.\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"SwiftCeilometerPipelineEnabled\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Set to False to disable the swift proxy ceilometer pipeline.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"SwiftCeilometerIgnoreProjects\": {\"default\": [\"service\"], \"type\": \"comma_delimited_list\", \"description\": \"Comma-seperated list of project names to ignore.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"SwiftWorkers\": {\"default\": \"auto\", \"type\": \"string\", \"description\": \"Number of workers for Swift service.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"SwiftPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the swift service account\"}, \"MonitoringSubscriptionSwiftProxy\": {\"default\": \"overcloud-swift-proxy\", \"type\": \"string\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Swift proxy service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionSwiftProxy\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"SwiftBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"TLSProxyBase\", \"role_data\", \"config_settings\"]}, {\"swift::proxy::node_timeout\": {\"get_param\": \"SwiftProxyNodeTimeout\"}, \"swift::proxy::authtoken::password\": {\"get_param\": \"SwiftPassword\"}, \"swift::proxy::workers\": {\"get_param\": \"SwiftWorkers\"}, \"swift::proxy::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri\"]}, \"swift::proxy::authtoken::project_name\": \"service\", \"swift::proxy::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}}, {\"if\": [\"ceilometer_pipeline_enabled\", {\"swift::proxy::ceilometer::nonblocking_notify\": true, \"swift::proxy::ceilometer::ignore_projects\": {\"get_param\": \"SwiftCeilometerIgnoreProjects\"}, \"swift::proxy::ceilometer::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"swift::proxy::ceilometer::rabbit_user\": {\"get_param\": \"RabbitUserName\"}, \"swift::proxy::ceilometer::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"swift::proxy::ceilometer::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri\"]}, \"swift::proxy::ceilometer::password\": {\"get_param\": \"SwiftPassword\"}, \"swift::proxy::ceilometer::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}}, {}]}, {\"tripleo::profile::base::swift::proxy::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"swift::proxy::account_autocreate\": true, \"swift::proxy::pipeline\": {\"yaql\": {\"expression\": \"$.data.pipeline.where($ != '')\", \"data\": {\"pipeline\": [\"catch_errors\", \"healthcheck\", \"proxy-logging\", \"cache\", \"ratelimit\", \"bulk\", \"tempurl\", \"formpost\", \"authtoken\", \"keystone\", \"staticweb\", \"copy\", \"container_quotas\", \"account_quotas\", \"slo\", \"dlo\", \"versioned_writes\", {\"if\": [\"ceilometer_pipeline_enabled\", \"ceilometer\", \"\"]}, \"proxy-logging\", \"proxy-server\"]}}}, \"tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"swift::proxy::versioned_writes::allow_versioned_writes\": true, \"swift::proxy::keystone::operator_roles\": [\"admin\", \"swiftoperator\", \"ResellerAdmin\"], \"swift::proxy::port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}, \"tripleo::profile::base::swift::proxy::tls_proxy_bind_ip\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}, \"tripleo.swift_proxy.firewall_rules\": {\"122 swift proxy\": {\"dport\": [8080, 13808]}}, \"swift::proxy::staticweb::url_base\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"uri_no_suffix\"]}, \"tripleo::profile::base::swift::proxy::tls_proxy_fqdn\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"swift::proxy::proxy_local_net_ip\": {\"if\": [\"use_tls_proxy\", \"localhost\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}, \"tripleo::profile::base::swift::proxy::ceilometer_enabled\": {\"get_param\": \"SwiftCeilometerPipelineEnabled\"}, \"tripleo::profile::base::swift::proxy::tls_proxy_port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}}]}, \"upgrade_tasks\": [{\"name\": \"Stop swift_proxy service\", \"service\": \"name=openstack-swift-proxy state=stopped\", \"tags\": \"step1\"}], \"service_config_settings\": {\"keystone\": {\"swift::keystone::auth::internal_url_s3\": {\"get_param\": [\"EndpointMap\", \"SwiftS3Internal\", \"uri\"]}, \"swift::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"swift::keystone::auth::configure_s3_endpoint\": false, \"swift::keystone::auth::admin_url_s3\": {\"get_param\": [\"EndpointMap\", \"SwiftS3Admin\", \"uri\"]}, \"swift::keystone::auth::password\": {\"get_param\": \"SwiftPassword\"}, \"swift::keystone::auth::tenant\": \"service\", \"swift::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"uri\"]}, \"swift::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"uri\"]}, \"swift::keystone::auth::public_url_s3\": {\"get_param\": [\"EndpointMap\", \"SwiftS3Public\", \"uri\"]}, \"swift::keystone::auth::operator_roles\": [\"admin\", \"swiftoperator\", \"ResellerAdmin\"], \"swift::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"uri\"]}}}, \"metadata_settings\": {\"get_attr\": [\"TLSProxyBase\", \"role_data\", \"metadata_settings\"]}, \"service_name\": \"swift_proxy\", \"step_config\": \"include ::tripleo::profile::base::swift::proxy\
0.355 | 3311: \"}}}, \"conditions\": {\"use_tls_proxy\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}, \"ceilometer_pipeline_enabled\": {\"equals\": [{\"get_param\": \"SwiftCeilometerPipelineEnabled\"}, true]}}, \"resources\": {\"TLSProxyBase\": {\"type\": \"OS::TripleO::Services::TLSProxyBase\", \"properties\": {\"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}}}, \"SwiftBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/service_net_map.yaml": "{\"parameter_groups\": [{\"description\": \"Do not use deprecated params, they will be removed.\", \"parameters\": [\"ServiceNetMapDeprecatedMapping\"], \"label\": \"deprecated\"}], \"heat_template_version\": \"pike\", \"description\": \"Mapping of service_name_network -> network name\
0.355 | 3311: \", \"parameters\": {\"ExternalNetName\": {\"default\": \"external\", \"type\": \"string\", \"description\": \"The name of the external network.\"}, \"TenantNetName\": {\"default\": \"tenant\", \"type\": \"string\", \"description\": \"The name of the tenant network.\"}, \"ServiceNetMapDefaults\": {\"default\": {\"IronicApiNetwork\": \"ctlplane\", \"OpendaylightApiNetwork\": \"internal_api\", \"PacemakerRemoteNetwork\": \"internal_api\", \"HeatApiCloudwatchNetwork\": \"internal_api\", \"SwiftStorageNetwork\": \"storage_mgmt\", \"CephRgwNetwork\": \"storage\", \"NeutronTenantNetwork\": \"tenant\", \"ManilaApiNetwork\": \"internal_api\", \"ContrailWebuiNetwork\": \"internal_api\", \"MongodbNetwork\": \"internal_api\", \"CeilometerApiNetwork\": \"internal_api\", \"CephStorageHostnameResolveNetwork\": \"storage\", \"Ec2ApiMetadataNetwork\": \"internal_api\", \"MemcachedNetwork\": \"internal_api\", \"Ec2ApiNetwork\": \"internal_api\", \"GnocchiApiNetwork\": \"internal_api\", \"KeystoneAdminApiNetwork\": \"ctlplane\", \"IronicNetwork\": \"ctlplane\", \"NovaLibvirtNetwork\": \"internal_api\", \"ContrailConfigNetwork\": \"internal_api\", \"CephMonNetwork\": \"storage\", \"MistralApiNetwork\": \"internal_api\", \"QdrNetwork\": \"internal_api\", \"NovaColdMigrationNetwork\": \"ctlplane\", \"ContrailAnalyticsDatabaseNetwork\": \"internal_api\", \"NovaMetadataNetwork\": \"internal_api\", \"AodhApiNetwork\": \"internal_api\", \"NovaPlacementNetwork\": \"internal_api\", \"GlanceApiNetwork\": \"internal_api\", \"ContrailControlNetwork\": \"internal_api\", \"ControllerHostnameResolveNetwork\": \"internal_api\", \"CongressApiNetwork\": \"internal_api\", \"NeutronApiNetwork\": \"internal_api\", \"HeatApiCfnNetwork\": \"internal_api\", \"SwiftProxyNetwork\": \"storage\", \"OvnDbsNetwork\": \"internal_api\", \"ContrailAnalyticsNetwork\": \"internal_api\", \"NovaApiNetwork\": \"internal_api\", \"ContrailDatabaseNetwork\": \"internal_api\", \"TackerApiNetwork\": \"internal_api\", \"EtcdNetwork\": \"internal_api\", \"KeystonePublicApiNetwork\": \"internal_api\", \"PankoApiNetwork\": \"internal_api\", \"CephClusterNetwork\": \"storage_mgmt\", \"ZaqarApiNetwork\": \"internal_api\", \"HeatApiNetwork\": \"internal_api\", \"ApacheNetwork\": \"internal_api\", \"ContrailTsnNetwork\": \"internal_api\", \"CinderApiNetwork\": \"internal_api\", \"NovaVncProxyNetwork\": \"internal_api\", \"RedisNetwork\": \"internal_api\", \"SaharaApiNetwork\": \"internal_api\", \"BarbicanApiNetwork\": \"internal_api\", \"OctaviaApiNetwork\": \"internal_api\", \"HorizonNetwork\": \"internal_api\", \"MysqlNetwork\": \"internal_api\", \"IronicInspectorNetwork\": \"ctlplane\", \"PublicNetwork\": \"external\", \"CinderIscsiNetwork\": \"storage\", \"RabbitmqNetwork\": \"internal_api\"}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults. Note that the key in this map must match the service_name in the service template, e.g if the service_name is heat_api the key must be either heat_api_network, or optionally HeatApiNetwork (which will be internally converted to transform captalization to underscores).\"}, \"InternalApiNetName\": {\"default\": \"internal_api\", \"type\": \"string\", \"description\": \"The name of the internal_api network.\"}, \"StorageMgmtNetName\": {\"default\": \"storage_mgmt\", \"type\": \"string\", \"description\": \"The name of the storage_mgmt network.\"}, \"StorageNetName\": {\"default\": \"storage\", \"type\": \"string\", \"description\": \"The name of the storage network.\"}, \"ServiceNetMapDeprecatedMapping\": {\"default\": {\"SwiftMgmtNetwork\": \"SwiftStorageNetwork\", \"MongoDbNetwork\": \"MongodbNetwork\", \"CephPublicNetwork\": \"CephMonNetwork\", \"RabbitMqNetwork\": \"RabbitmqNetwork\"}, \"type\": \"json\", \"description\": \"Mapping older deprecated service names, intended for internal use only, this will be removed in future.\"}, \"ManagementNetName\": {\"default\": \"management\", \"type\": \"string\", \"description\": \"The name of the management network.\"}}, \"outputs\": {\"service_net_map\": {\"value\": {\"get_attr\": [\"ServiceNetMapValue\", \"value\"]}}, \"service_net_map_lower\": {\"value\": {\"yaql\": {\"expression\": \"dict($.data.map.items().select([ regex(`([a-z0-9])([A-Z])`).replace($[0], '\\\\\\\\1_\\\\\\\\2').toLower(), $[1]]))\", \"data\": {\"map\": {\"get_attr\": [\"ServiceNetMapValue\", \"value\"]}}}}}}, \"resources\": {\"ServiceNetMapValue\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"map_merge\": [{\"map_replace\": [{\"get_param\": \"ServiceNetMapDefaults\"}, {\"values\": {\"management\": {\"get_param\": \"ManagementNetName\"}, \"external\": {\"get_param\": \"ExternalNetName\"}, \"internal_api\": {\"get_param\": \"InternalApiNetName\"}, \"storage_mgmt\": {\"get_param\": \"StorageMgmtNetName\"}, \"storage\": {\"get_param\": \"StorageNetName\"}, \"tenant\": {\"get_param\": \"TenantNetName\"}}}]}, {\"map_replace\": [{\"get_param\": \"ServiceNetMap\"}, {\"keys\": {\"get_param\": \"ServiceNetMapDeprecatedMapping\"}}]}]}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/ci/common/all-nodes-validation-disabled.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The ID of the AllNodesValidationsImpl resource.\", \"value\": {\"get_resource\": \"AllNodesValidationsImpl\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Software Config to drive validations that occur on all nodes. Note, you need the heat-config-script element built into your images, due to the script group below.\
0.355 | 3311: This implementation of the validations is a noop that always reports success.\
0.355 | 3311: \", \"parameters\": {\"ValidateFqdn\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.\"}, \"PingTestIps\": {\"default\": \"\", \"type\": \"string\", \"description\": \"A string containing a space separated list of IP addresses used to ping test each available network interface.\"}, \"ValidateNtp\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Validation to ensure at least one time source is accessible.\"}}, \"resources\": {\"AllNodesValidationsImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"default\": {\"get_param\": \"PingTestIps\"}, \"name\": \"ping_test_ips\"}, {\"default\": {\"get_param\": \"ValidateFqdn\"}, \"name\": \"validate_fqdn\"}, {\"default\": {\"get_param\": \"ValidateNtp\"}, \"name\": \"validate_ntp\"}], \"config\": \"#!/bin/bash\
0.355 | 3311: exit 0\
0.355 | 3311: \", \"group\": \"script\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/default_passwords.yaml": "{\"outputs\": {\"passwords\": {\"description\": \"Password data\", \"value\": {\"mysql_root_password\": {\"get_param\": \"DefaultMysqlRootPassword\"}, \"rabbit_cookie\": {\"get_param\": \"DefaultRabbitCookie\"}, \"pcsd_password\": {\"get_param\": \"DefaultPcsdPassword\"}, \"heat_auth_encryption_key\": {\"get_param\": \"DefaultHeatAuthEncryptionKey\"}, \"horizon_secret\": {\"get_param\": \"DefaultHorizonSecret\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"Passwords we manage at the top level\", \"parameters\": {\"DefaultMysqlRootPassword\": {\"type\": \"string\"}, \"DefaultHorizonSecret\": {\"type\": \"string\"}, \"DefaultHeatAuthEncryptionKey\": {\"type\": \"string\"}, \"DefaultRabbitCookie\": {\"type\": \"string\"}, \"DefaultPcsdPassword\": {\"type\": \"string\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/deploy-steps-tasks.yaml": " # Note the indentation here is required as it's joined
0.355 | 3311: # to create a playbook in deploy-steps.j2
0.355 | 3311:
0.355 | 3311: #####################################################
0.355 | 3311: # Per step puppet configuration of the baremetal host
0.355 | 3311: #####################################################
0.355 | 3311: - name: Write the config_step hieradata
0.355 | 3311: copy: content=\"{{dict(step=step|int)|to_json}}\" dest=/etc/puppet/hieradata/config_step.json force=true mode=0600
0.355 | 3311: - name: Run puppet host configuration for step {{step}}
0.355 | 3311: command: >-
0.355 | 3311: puppet apply
0.355 | 3311: --modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
0.355 | 3311: --logdest syslog --logdest console --color=false
0.355 | 3311: /var/lib/tripleo-config/puppet_step_config.pp
0.355 | 3311: changed_when: false
0.355 | 3311: check_mode: no
0.355 | 3311: register: outputs
0.355 | 3311: failed_when: false
0.355 | 3311: no_log: true
0.355 | 3311: - debug: var=(outputs.stderr|default('')).split('\
0.355 | 3311: ')|union(outputs.stdout_lines|default([]))
0.355 | 3311: when: outputs is defined
0.355 | 3311: failed_when: outputs|failed
0.355 | 3311: ######################################
0.355 | 3311: # Generate config via docker-puppet.py
0.355 | 3311: ######################################
0.355 | 3311: - name: Run docker-puppet tasks (generate config)
0.355 | 3311: shell: python /var/lib/docker-puppet/docker-puppet.py
0.355 | 3311: environment:
0.355 | 3311: NET_HOST: 'true'
0.355 | 3311: DEBUG: '{{docker_puppet_debug|default(false)}}'
0.355 | 3311: PROCESS_COUNT: '{{docker_puppet_process_count|default(3)}}'
0.355 | 3311: when: step == \"1\"
0.355 | 3311: changed_when: false
0.355 | 3311: check_mode: no
0.355 | 3311: register: outputs
0.355 | 3311: failed_when: false
0.355 | 3311: no_log: true
0.355 | 3311: - debug: var=(outputs.stderr|default('')).split('\
0.355 | 3311: ')|union(outputs.stdout_lines|default([]))
0.355 | 3311: when: outputs is defined
0.355 | 3311: failed_when: outputs|failed
0.355 | 3311: ##################################################
0.355 | 3311: # Per step starting of the containers using paunch
0.355 | 3311: ##################################################
0.355 | 3311: - name: Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_{{step}}.json exists
0.355 | 3311: stat:
0.355 | 3311: path: /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json
0.355 | 3311: register: docker_config_json
0.355 | 3311: # Note docker-puppet.py generates the hashed-*.json file, which is a copy of
0.355 | 3311: # the *step_n.json with a hash of the generated external config added
0.355 | 3311: # This acts as a salt to enable restarting the container if config changes
0.355 | 3311: - name: Start containers for step {{step}}
0.355 | 3311: command: >-
0.355 | 3311: paunch --debug apply
0.355 | 3311: --file /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json
0.355 | 3311: --config-id tripleo_step{{step}} --managed-by tripleo-{{role_name}}
0.355 | 3311: when: docker_config_json.stat.exists
0.355 | 3311: changed_when: false
0.355 | 3311: check_mode: no
0.355 | 3311: register: outputs
0.355 | 3311: failed_when: false
0.355 | 3311: no_log: true
0.355 | 3311: - debug: var=(outputs.stderr|default('')).split('\
0.355 | 3311: ')|union(outputs.stdout_lines|default([]))
0.355 | 3311: when: outputs is defined
0.355 | 3311: failed_when: outputs|failed
0.355 | 3311: ########################################################
0.355 | 3311: # Bootstrap tasks, only performed on bootstrap_server_id
0.355 | 3311: ########################################################
0.355 | 3311: - name: Check if /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json exists
0.355 | 3311: stat:
0.355 | 3311: path: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
0.355 | 3311: register: docker_puppet_tasks_json
0.355 | 3311: - name: Run docker-puppet tasks (bootstrap tasks)
0.355 | 3311: shell: python /var/lib/docker-puppet/docker-puppet.py
0.355 | 3311: environment:
0.355 | 3311: CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
0.355 | 3311: NET_HOST: \"true\"
0.355 | 3311: NO_ARCHIVE: \"true\"
0.355 | 3311: STEP: \"{{step}}\"
0.355 | 3311: when: deploy_server_id == bootstrap_server_id and docker_puppet_tasks_json.stat.exists
0.355 | 3311: changed_when: false
0.355 | 3311: check_mode: no
0.355 | 3311: register: outputs
0.355 | 3311: failed_when: false
0.355 | 3311: no_log: true
0.355 | 3311: - debug: var=(outputs.stderr|default('')).split('\
0.355 | 3311: ')|union(outputs.stdout_lines|default([]))
0.355 | 3311: when: outputs is defined
0.355 | 3311: failed_when: outputs|failed
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-listener.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Aodh Listener service.\", \"value\": {\"service_name\": \"aodh_listener\", \"step_config\": \"include tripleo::profile::base::aodh::listener\
0.355 | 3311: \", \"config_settings\": {\"get_attr\": [\"AodhBase\", \"role_data\", \"config_settings\"]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"aodh_listener_enabled\", \"command\": \"systemctl is-enabled openstack-aodh-listener\", \"name\": \"Check if aodh_listener is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-aodh-listener' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"aodh_listener_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-aodh-listener is running\", \"tags\": \"step0,validation\"}, {\"when\": \"aodh_listener_enabled.rc == 0\", \"name\": \"Stop aodh_listener service\", \"service\": \"name=openstack-aodh-listener state=stopped\", \"tags\": \"step1\"}], \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionAodhListener\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Aodh Listener service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"MonitoringSubscriptionAodhListener\": {\"default\": \"overcloud-ceilometer-aodh-listener\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"AodhBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/all-nodes-config.yaml": "{\"outputs\": {\"config_id\": {\"description\": \"The ID of the allNodesConfigImpl resource.\", \"value\": {\"get_resource\": \"allNodesConfigImpl\"}}}, \"heat_template_version\": \"pike\", \"description\": \"All Nodes Config for Puppet\", \"parameters\": {\"ExternalNetName\": {\"default\": \"external\", \"type\": \"string\", \"description\": \"The name of the external network.\"}, \"TenantNetName\": {\"default\": \"tenant\", \"type\": \"string\", \"description\": \"The name of the tenant network.\"}, \"CertmongerCA\": {\"default\": \"IPA\", \"type\": \"string\"}, \"short_service_bootstrap_node\": {\"type\": \"json\"}, \"service_ips\": {\"type\": \"json\"}, \"cloud_name_external\": {\"type\": \"string\"}, \"StackAction\": {\"type\": \"string\", \"description\": \"Heat action on performed top-level stack. Note StackUpdateType is set to UPGRADE when a major-version upgrade is in progress.\
0.355 | 3311: \", \"constraints\": [{\"allowed_values\": [\"CREATE\", \"UPDATE\"]}]}, \"cloud_name_storage_mgmt\": {\"type\": \"string\"}, \"cloud_name_storage\": {\"type\": \"string\"}, \"DeployIdentifier\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Setting this to a unique value will re-run any deployment tasks which perform configuration on a Heat stack-update.\
0.355 | 3311: \"}, \"StackUpdateType\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Type of update, to differentiate between UPGRADE and UPDATE cases when StackAction is UPDATE (both are the same stack action).\
0.355 | 3311: \", \"constraints\": [{\"allowed_values\": [\"\", \"UPGRADE\"]}]}, \"StorageMgmtNetName\": {\"default\": \"storage_mgmt\", \"type\": \"string\", \"description\": \"The name of the storage_mgmt network.\"}, \"RedisVirtualIP\": {\"default\": \"\", \"type\": \"string\"}, \"InternalApiNetName\": {\"default\": \"internal_api\", \"type\": \"string\", \"description\": \"The name of the internal_api network.\"}, \"cloud_name_internal_api\": {\"type\": \"string\"}, \"controller_ips\": {\"type\": \"comma_delimited_list\"}, \"controller_names\": {\"type\": \"comma_delimited_list\"}, \"short_service_node_names\": {\"type\": \"json\"}, \"cloud_name_ctlplane\": {\"type\": \"string\"}, \"ServiceNetMap\": {\"type\": \"json\"}, \"service_node_names\": {\"type\": \"json\"}, \"NetVipMap\": {\"type\": \"json\"}, \"enabled_services\": {\"type\": \"comma_delimited_list\"}, \"UpdateIdentifier\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Setting to a previously unused value during stack-update will trigger package update on all nodes\
0.355 | 3311: \"}, \"StorageNetName\": {\"default\": \"storage\", \"type\": \"string\", \"description\": \"The name of the storage network.\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"ManagementNetName\": {\"default\": \"management\", \"type\": \"string\", \"description\": \"The name of the management network.\"}}, \"resources\": {\"allNodesConfigImpl\": {\"type\": \"OS::Heat::StructuredConfig\", \"properties\": {\"group\": \"hiera\", \"config\": {\"datafiles\": {\"all_nodes\": {\"map_merge\": [{\"enabled_services\": {\"yaql\": {\"expression\": \"$.data.distinct()\", \"data\": {\"get_param\": \"enabled_services\"}}}}, {\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_param\": \"enabled_services\"}}, \"template\": {\"SERVICE_enabled\": \"true\"}}}}, {\"yaql\": {\"expression\": \"dict($.data.map.items().where(isString($[1]) and not $[1].endsWith(\\\"_network\\\")))\", \"data\": {\"map\": {\"map_replace\": [{\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_param\": \"enabled_services\"}}, \"template\": {\"SERVICE_network\": \"SERVICE_network\"}}}}, {\"values\": {\"get_param\": \"ServiceNetMap\"}}]}}}}, {\"keystone_admin_api_network\": {\"get_param\": [\"ServiceNetMap\", \"keystone_admin_api_network\"]}, \"keystone_public_api_network\": {\"get_param\": [\"ServiceNetMap\", \"keystone_public_api_network\"]}}, {\"get_param\": \"service_ips\"}, {\"get_param\": \"service_node_names\"}, {\"get_param\": \"short_service_node_names\"}, {\"get_param\": \"short_service_bootstrap_node\"}, {\"update_identifier\": {\"get_param\": \"UpdateIdentifier\"}, \"controller_node_names\": {\"list_join\": [\",\", {\"get_param\": \"controller_names\"}]}, \"controller_node_ips\": {\"list_join\": [\",\", {\"get_param\": \"controller_ips\"}]}, \"stack_action\": {\"get_param\": \"StackAction\"}, \"deploy_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"stack_update_type\": {\"get_param\": \"StackUpdateType\"}}]}, \"vip_data\": {\"map_merge\": [{\"yaql\": {\"expression\": \"dict($.data.map.items().where(isString($[1]) and not $[1].endsWith(\\\"_network\\\")))\", \"data\": {\"map\": {\"map_replace\": [{\"map_replace\": [{\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_param\": \"enabled_services\"}}, \"template\": {\"SERVICE_vip\": \"SERVICE_network\"}}}}, {\"values\": {\"get_param\": \"ServiceNetMap\"}}]}, {\"values\": {\"get_param\": \"NetVipMap\"}}]}}}}, {\"internal_api_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"InternalApiNetName\"}]}, \"tripleo::keepalived::public_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"ExternalNetName\"}]}, \"redis_vip\": {\"get_param\": \"RedisVirtualIP\"}, \"tripleo::keepalived::storage_mgmt_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"StorageMgmtNetName\"}]}, \"keystone_admin_api_vip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": [\"ServiceNetMap\", \"keystone_admin_api_network\"]}]}, \"tripleo::haproxy::public_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"ExternalNetName\"}]}, \"tripleo::redis_notification::haproxy_monitor_ip\": {\"get_param\": [\"NetVipMap\", \"ctlplane\"]}, \"cloud_name_storage_mgmt\": {\"get_param\": \"cloud_name_storage_mgmt\"}, \"cloud_name_storage\": {\"get_param\": \"cloud_name_storage\"}, \"public_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"ExternalNetName\"}]}, \"keystone_public_api_vip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": [\"ServiceNetMap\", \"keystone_public_api_network\"]}]}, \"certmonger_ca\": {\"get_param\": \"CertmongerCA\"}, \"tripleo::keepalived::internal_api_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"InternalApiNetName\"}]}, \"cloud_name_internal_api\": {\"get_param\": \"cloud_name_internal_api\"}, \"controller_virtual_ip\": {\"get_param\": [\"NetVipMap\", \"ctlplane\"]}, \"tripleo::keepalived::redis_virtual_ip\": {\"get_param\": \"RedisVirtualIP\"}, \"storage_mgmt_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"StorageMgmtNetName\"}]}, \"cloud_name_ctlplane\": {\"get_param\": \"cloud_name_ctlplane\"}, \"storage_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"StorageNetName\"}]}, \"tripleo::haproxy::controller_virtual_ip\": {\"get_param\": [\"NetVipMap\", \"ctlplane\"]}, \"cloud_name_external\": {\"get_param\": \"cloud_name_external\"}, \"enable_internal_tls\": {\"get_param\": \"EnableInternalTLS\"}, \"tripleo::keepalived::controller_virtual_ip\": {\"get_param\": [\"NetVipMap\", \"ctlplane\"]}, \"tripleo::keepalived::storage_virtual_ip\": {\"get_param\": [\"NetVipMap\", {\"get_param\": \"StorageNetName\"}]}}]}, \"bootstrap_node\": {\"bootstrap_nodeid\": {\"get_input\": \"bootstrap_nodeid\"}, \"bootstrap_nodeid_ip\": {\"get_input\": \"bootstrap_nodeid_ip\"}}}}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/deployed-server/deployed-server.yaml": "{\"outputs\": {\"name\": {\"value\": {\"get_attr\": [\"HostsEntryDeployment\", \"hostname\"]}}, \"networks\": {\"value\": {\"ctlplane\": [{\"get_attr\": [\"ControlPlanePort\", \"fixed_ips\", 0, \"ip_address\"]}]}}, \"OS::stack_id\": {\"value\": {\"get_resource\": \"deployed-server\"}}, \"os_collect_config\": {\"value\": {\"get_attr\": [\"deployed-server\", \"os_collect_config\"]}}}, \"heat_template_version\": \"pike\", \"parameters\": {\"deployment_swift_data\": {\"default\": {}, \"type\": \"json\"}, \"user_data_format\": {\"type\": \"string\"}, \"name\": {\"default\": \"deployed-server\", \"type\": \"string\"}, \"UpgradeInitCommand\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Command or script snippet to run on all overcloud nodes to\
0.355 | 3311: initialize the upgrade process. E.g. a repository switch.\
0.355 | 3311: \"}, \"key_name\": {\"default\": \"unused\", \"type\": \"string\", \"description\": \"Name of keypair to assign to servers\"}, \"image\": {\"default\": \"unused\", \"type\": \"string\"}, \"user_data\": {\"default\": \"\", \"type\": \"string\"}, \"image_update_policy\": {\"default\": \"\", \"type\": \"string\"}, \"metadata\": {\"default\": {}, \"type\": \"json\"}, \"flavor\": {\"default\": \"unused\", \"type\": \"string\"}, \"software_config_transport\": {\"default\": \"POLL_SERVER_CFN\", \"type\": \"string\"}, \"networks\": {\"default\": \"\", \"type\": \"comma_delimited_list\"}, \"security_groups\": {\"default\": [], \"type\": \"json\"}, \"scheduler_hints\": {\"default\": {}, \"type\": \"json\", \"description\": \"Optional scheduler hints to pass to nova\"}}, \"resources\": {\"deployed-server\": {\"type\": \"OS::Heat::DeployedServer\", \"properties\": {\"deployment_swift_data\": {\"get_param\": \"deployment_swift_data\"}, \"name\": {\"get_param\": \"name\"}, \"software_config_transport\": {\"get_param\": \"software_config_transport\"}}}, \"UpgradeInitDeployment\": {\"type\": \"OS::Heat::SoftwareDeployment\", \"properties\": {\"config\": {\"get_resource\": \"UpgradeInitConfig\"}, \"name\": \"UpgradeInitDeployment\", \"server\": {\"get_resource\": \"deployed-server\"}}}, \"HostsEntryDeployment\": {\"type\": \"OS::Heat::SoftwareDeployment\", \"properties\": {\"config\": {\"get_resource\": \"HostsEntryConfig\"}, \"name\": \"HostsEntryDeployment\", \"server\": {\"get_resource\": \"deployed-server\"}}}, \"UpgradeInitConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"list_join\": [\"\", [\"#!/bin/bash\
0.355 | 3311: \
0.355 | 3311: \", \"if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\
0.355 | 3311: \
0.355 | 3311: \", {\"get_param\": \"UpgradeInitCommand\"}]]}}}, \"InstanceIdDeployment\": {\"depends_on\": \"UpgradeInitDeployment\", \"type\": \"OS::Heat::StructuredDeployment\", \"properties\": {\"config\": {\"get_resource\": \"InstanceIdConfig\"}, \"name\": \"InstanceIdDeployment\", \"server\": {\"get_resource\": \"deployed-server\"}}}, \"InstanceIdConfig\": {\"type\": \"OS::Heat::StructuredConfig\", \"properties\": {\"group\": \"apply-config\", \"config\": {\"instance-id\": {\"get_resource\": \"deployed-server\"}}}}, \"ControlPlanePort\": {\"type\": \"OS::TripleO::DeployedServer::ControlPlanePort\", \"properties\": {\"replacement_policy\": \"AUTO\", \"network\": \"ctlplane\", \"name\": {\"list_join\": [\"-\", [{\"get_attr\": [\"HostsEntryDeployment\", \"hostname\"]}, \"ctlplane\"]]}}}, \"DeployedServerBootstrapConfig\": {\"type\": \"OS::TripleO::DeployedServer::Bootstrap\", \"properties\": {\"server\": {\"get_resource\": \"deployed-server\"}}}, \"HostsEntryConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": \"#!/bin/bash\
0.355 | 3311: set -eux\
0.355 | 3311: mkdir -p $heat_outputs_path\
0.355 | 3311: host=$(hostname -s)\
0.355 | 3311: echo -n $host > $heat_outputs_path.hostname\
0.355 | 3311: cat $heat_outputs_path.hostname\
0.355 | 3311: \", \"outputs\": [{\"name\": \"hostname\", \"description\": \"hostname\"}]}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/extraconfig/pre_deploy/default.yaml": "{\"outputs\": {\"deploy_stdout\": {\"value\": \"None\"}}, \"heat_template_version\": \"pike\", \"description\": \"Noop Extra Pre-Deployment Config\", \"parameters\": {\"server\": {\"type\": \"string\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-nsx.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron NSX plugin\", \"value\": {\"service_name\": \"neutron_plugin_nsx\", \"step_config\": \"include tripleo::profile::base::neutron::plugins::nsx\", \"config_settings\": {\"neutron::plugins::nsx::nsx_api_user\": {\"get_param\": \"NsxApiUser\"}, \"neutron::plugins::nsx::metadata_proxy_uuid\": {\"get_param\": \"MetadataProxyUuid\"}, \"neutron::plugins::nsx::native_dhcp_metadata\": {\"get_param\": \"NativeDhcpMetadata\"}, \"neutron::plugins::nsx::nsx_api_password\": {\"get_param\": \"NsxApiPassword\"}, \"neutron::plugins::nsx::default_overlay_tz\": {\"get_param\": \"DefaultOverlayTz\"}, \"neutron::plugins::nsx::default_tier0_router\": {\"get_param\": \"DefaultTier0Router\"}, \"neutron::plugins::nsx::nsx_api_managers\": {\"get_param\": \"NsxApiManagers\"}, \"neutron::plugins::nsx::dhcp_profile_uuid\": {\"get_param\": \"DhcpProfileUuid\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron NSX\
0.355 | 3311: \", \"parameters\": {\"DefaultTier0Router\": {\"type\": \"string\", \"description\": \"UUID of the default tier0 router that will be used for connecting to tier1 logical routers and configuring external networks.\"}, \"NativeDhcpMetadata\": {\"default\": true, \"type\": \"boolean\", \"description\": \"This is the flag to indicate if using native DHCP/Metadata or not.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"DhcpProfileUuid\": {\"type\": \"string\", \"description\": \"This is the UUID of the NSX DHCP Profile that will be used to enable native DHCP service.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"DefaultOverlayTz\": {\"type\": \"string\", \"description\": \"UUID of the default NSX overlay transport zone.\"}, \"NsxApiPassword\": {\"type\": \"string\", \"description\": \"Password of NSX Manager.\"}, \"MetadataProxyUuid\": {\"type\": \"string\", \"description\": \"This is the UUID of the NSX Metadata Proxy that will be used to enable native metadata service.\"}, \"NsxApiUser\": {\"type\": \"string\", \"description\": \"User name of NSX Manager.\"}, \"NsxApiManagers\": {\"type\": \"string\", \"description\": \"IP address of one or more NSX managers separated by commas.\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/scripts/run-os-net-config.sh": "#!/bin/bash
0.355 | 3311: # The following environment variables may be set to substitute in a
0.355 | 3311: # custom bridge or interface name. Normally these are provided by the calling
0.355 | 3311: # SoftwareConfig resource, but they may also be set manually for testing.
0.355 | 3311: # $bridge_name : The bridge device name to apply
0.355 | 3311: # $interface_name : The interface name to apply
0.355 | 3311: #
0.355 | 3311: # Also this token is replaced via a str_replace in the SoftwareConfig running
0.355 | 3311: # the script - in future we may extend this to also work with a variable, e.g
0.355 | 3311: # a deployment input via input_values
0.355 | 3311: # $network_config : the json serialized os-net-config config to apply
0.355 | 3311: #
0.355 | 3311: set -eux
0.355 | 3311:
0.355 | 3311: function get_metadata_ip() {
0.355 | 3311:
0.355 | 3311: local METADATA_IP
0.355 | 3311:
0.355 | 3311: # Look for a variety of Heat transports
0.355 | 3311: # FIXME: Heat should provide a way to obtain this in a single place
0.355 | 3311: for URL in os-collect-config.cfn.metadata_url os-collect-config.heat.auth_url os-collect-config.request.metadata_url os-collect-config.zaqar.auth_url; do
0.355 | 3311: METADATA_IP=$(os-apply-config --key $URL --key-default '' --type raw 2>/dev/null | sed -e 's|http.*://\\([^:]*\\).*|\\1|')
0.355 | 3311: [ -n \"$METADATA_IP\" ] && break
0.355 | 3311: done
0.355 | 3311:
0.355 | 3311: echo $METADATA_IP
0.355 | 3311:
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function is_local_ip() {
0.355 | 3311: local IP_TO_CHECK=$1
0.355 | 3311: if ip -o a | grep \"inet6\\? $IP_TO_CHECK/\" &>/dev/null; then
0.355 | 3311: return 0
0.355 | 3311: else
0.355 | 3311: return 1
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function ping_metadata_ip() {
0.355 | 3311: local METADATA_IP=$(get_metadata_ip)
0.355 | 3311:
0.355 | 3311: if [ -n \"$METADATA_IP\" ] && ! is_local_ip $METADATA_IP; then
0.355 | 3311:
0.355 | 3311: echo -n \"Trying to ping metadata IP ${METADATA_IP}...\"
0.355 | 3311:
0.355 | 3311: local COUNT=0
0.355 | 3311: until ping -c 1 $METADATA_IP &> /dev/null; do
0.355 | 3311: COUNT=$(( $COUNT + 1 ))
0.355 | 3311: if [ $COUNT -eq 10 ]; then
0.355 | 3311: echo \"FAILURE\"
0.355 | 3311: echo \"$METADATA_IP is not pingable.\" >&2
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: echo \"SUCCESS\"
0.355 | 3311:
0.355 | 3311: else
0.355 | 3311: echo \"No metadata IP found. Skipping.\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function configure_safe_defaults() {
0.355 | 3311:
0.355 | 3311: [[ $? == 0 ]] && return 0
0.355 | 3311:
0.355 | 3311: cat > /etc/os-net-config/dhcp_all_interfaces.yaml <<EOF_CAT
0.355 | 3311: # This file is an autogenerated safe defaults file for os-net-config
0.355 | 3311: # which runs DHCP on all discovered interfaces to ensure connectivity
0.355 | 3311: # back to the undercloud for updates
0.355 | 3311: network_config:
0.355 | 3311: EOF_CAT
0.355 | 3311:
0.355 | 3311: for iface in $(ls /sys/class/net | grep -v ^lo$); do
0.355 | 3311: local mac_addr_type=\"$(cat /sys/class/net/${iface}/addr_assign_type)\"
0.355 | 3311: if [ \"$mac_addr_type\" != \"0\" ]; then
0.355 | 3311: echo \"Device has generated MAC, skipping.\"
0.355 | 3311: else
0.355 | 3311: HAS_LINK=\"$(cat /sys/class/net/${iface}/carrier || echo 0)\"
0.355 | 3311:
0.355 | 3311: TRIES=10
0.355 | 3311: while [ \"$HAS_LINK\" == \"0\" -a $TRIES -gt 0 ]; do
0.355 | 3311: # Need to set the link up on each iteration
0.355 | 3311: ip link set dev $iface up &>/dev/null
0.355 | 3311: HAS_LINK=\"$(cat /sys/class/net/${iface}/carrier || echo 0)\"
0.355 | 3311: if [ \"$HAS_LINK\" == \"1\" ]; then
0.355 | 3311: break
0.355 | 3311: else
0.355 | 3311: sleep 1
0.355 | 3311: fi
0.355 | 3311: TRIES=$(( TRIES - 1 ))
0.355 | 3311: done
0.355 | 3311: if [ \"$HAS_LINK\" == \"1\" ] ; then
0.355 | 3311: cat >> /etc/os-net-config/dhcp_all_interfaces.yaml <<EOF_CAT
0.355 | 3311: -
0.355 | 3311: type: interface
0.355 | 3311: name: $iface
0.355 | 3311: use_dhcp: true
0.355 | 3311: EOF_CAT
0.355 | 3311: fi
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: set +e
0.355 | 3311: os-net-config -c /etc/os-net-config/dhcp_all_interfaces.yaml -v --detailed-exit-codes --cleanup
0.355 | 3311: RETVAL=$?
0.355 | 3311: set -e
0.355 | 3311: if [[ $RETVAL == 2 ]]; then
0.355 | 3311: ping_metadata_ip
0.355 | 3311: elif [[ $RETVAL != 0 ]]; then
0.355 | 3311: echo \"ERROR: configuration of safe defaults failed.\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: if [ -n '$network_config' ]; then
0.355 | 3311: if [ -z \"${disable_configure_safe_defaults:-}\" ]; then
0.355 | 3311: trap configure_safe_defaults EXIT
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: mkdir -p /etc/os-net-config
0.355 | 3311: # Note these variables come from the calling heat SoftwareConfig
0.355 | 3311: echo '$network_config' > /etc/os-net-config/config.json
0.355 | 3311:
0.355 | 3311: if [ \"$(type -t network_config_hook)\" = \"function\" ]; then
0.355 | 3311: network_config_hook
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: sed -i \"s/bridge_name/${bridge_name:-''}/\" /etc/os-net-config/config.json
0.355 | 3311: sed -i \"s/interface_name/${interface_name:-''}/\" /etc/os-net-config/config.json
0.355 | 3311:
0.355 | 3311: set +e
0.355 | 3311: os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
0.355 | 3311: RETVAL=$?
0.355 | 3311: set -e
0.355 | 3311:
0.355 | 3311: if [[ $RETVAL == 2 ]]; then
0.355 | 3311: ping_metadata_ip
0.355 | 3311:
0.355 | 3311: #NOTE: dprince this udev rule can apparently leak DHCP processes?
0.355 | 3311: # https://bugs.launchpad.net/tripleo/+bug/1538259
0.355 | 3311: # until we discover the root cause we can simply disable the
0.355 | 3311: # rule because networking has already been configured at this point
0.355 | 3311: if [ -f /etc/udev/rules.d/99-dhcp-all-interfaces.rules ]; then
0.355 | 3311: rm /etc/udev/rules.d/99-dhcp-all-interfaces.rules
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: elif [[ $RETVAL != 0 ]]; then
0.355 | 3311: echo \"ERROR: os-net-config configuration failed.\" >&2
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: fi
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-api-cfn.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Openstack Heat CloudFormation API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"MonitoringSubscriptionHeatApiCnf\": {\"default\": \"overcloud-heat-api-cfn\", \"type\": \"string\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"HeatWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Heat service.\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"HeatApiCfnLoggingSource\": {\"default\": {\"path\": \"/var/log/heat/heat-api-cfn.log\", \"tag\": \"openstack.heat.api.cfn\"}, \"type\": \"json\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"HeatPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the Heat service and db account, used by the Heat services.\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Heat CloudFormation API role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"HeatBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"tripleo.heat_api_cfn.firewall_rules\": {\"125 heat_cfn\": {\"dport\": [8000, 13800]}}, \"heat::wsgi::apache_api_cfn::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiCfnNetwork\"]}, \"heat::api_cfn::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiCfnNetwork\"]}, \"heat::wsgi::apache_api_cfn::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiCfnNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"heat::api_cfn::service_name\": \"httpd\", \"heat::wsgi::apache_api_cfn::ssl\": {\"get_param\": \"EnableInternalTLS\"}}, {\"if\": [\"heat_workers_zero\", {}, {\"heat::wsgi::apache_api_cfn::workers\": {\"get_param\": \"HeatWorkers\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"heat_api_cfn_enabled\", \"command\": \"systemctl is-enabled openstack-heat-api-cfn\", \"name\": \"Check if heat_api_cfn is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"heat_api_cfn_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-heat-api-cfn is running\", \"tags\": \"step0,validation\"}, {\"ignore_errors\": true, \"shell\": \"httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi\", \"register\": \"heat_api_cfn_apache\", \"name\": \"check for heat_api_cfn running under apache (post upgrade)\", \"tags\": \"step1\"}, {\"when\": \"heat_api_cfn_apache.rc == 0\", \"name\": \"Stop heat_api_cfn service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}, {\"when\": \"heat_api_cfn_enabled.rc == 0\", \"name\": \"Stop and disable heat_api_cfn service (pre-upgrade not under httpd)\", \"service\": \"name=openstack-heat-api-cfn state=stopped enabled=no\", \"tags\": \"step1\"}], \"logging_groups\": [\"heat\"], \"service_name\": \"heat_api_cfn\", \"step_config\": \"include ::tripleo::profile::base::heat::api_cfn\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionHeatApiCnf\"}, \"service_config_settings\": {\"keystone\": {\"map_merge\": [{\"get_attr\": [\"HeatBase\", \"role_data\", \"service_config_settings\", \"keystone\"]}, {\"heat::keystone::auth_cfn::internal_url\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"uri\"]}, \"heat::keystone::auth_cfn::tenant\": \"service\", \"heat::keystone::auth_cfn::region\": {\"get_param\": \"KeystoneRegion\"}, \"heat::keystone::auth_cfn::admin_url\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"uri\"]}, \"heat::keystone::auth_cfn::password\": {\"get_param\": \"HeatPassword\"}, \"heat::keystone::auth_cfn::public_url\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"uri\"]}}]}}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"HeatApiCfnLoggingSource\"}}}}, \"conditions\": {\"heat_workers_zero\": {\"equals\": [{\"get_param\": \"HeatWorkers\"}, 0]}}, \"resources\": {\"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"HeatBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/ssh/known_hosts_config.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The SSHKnownHostsConfig resource.\", \"value\": {\"get_resource\": \"SSHKnownHostsConfig\"}}}, \"heat_template_version\": \"pike\", \"description\": \"SSH Known Hosts Config\", \"parameters\": {\"known_hosts\": {\"type\": \"string\"}}, \"resources\": {\"SSHKnownHostsConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"default\": {\"get_param\": \"known_hosts\"}, \"name\": \"known_hosts\"}], \"config\": \"#!/bin/bash\
0.355 | 3311: set -eux\
0.355 | 3311: set -o pipefail\
0.355 | 3311: \
0.355 | 3311: echo \\\"Creating ssh known hosts file\\\"\
0.355 | 3311: \
0.355 | 3311: if [ ! -z \\\"${known_hosts}\\\" ]; then\
0.355 | 3311: echo \\\"${known_hosts}\\\"\
0.355 | 3311: echo -ne \\\"${known_hosts}\\\" > /etc/ssh/ssh_known_hosts\
0.355 | 3311: chmod 0644 /etc/ssh/ssh_known_hosts\
0.355 | 3311: else\
0.355 | 3311: rm -f /etc/ssh/ssh_known_hosts\
0.355 | 3311: echo \\\"No ssh known hosts\\\"\
0.355 | 3311: fi\
0.355 | 3311: \", \"group\": \"script\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pacemaker_common_functions.sh": "#!/bin/bash
0.355 | 3311:
0.355 | 3311: set -eu
0.355 | 3311:
0.355 | 3311: DEBUG=\"true\" # set false if the verbosity is a problem
0.355 | 3311: SCRIPT_NAME=$(basename $0)
0.355 | 3311: function log_debug {
0.355 | 3311: if [[ $DEBUG = \"true\" ]]; then
0.355 | 3311: echo \"`date` $SCRIPT_NAME tripleo-upgrade $(facter hostname) $1\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function is_bootstrap_node {
0.355 | 3311: if [ \"$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid | tr '[:upper:]' '[:lower:]')\" = \"$(facter hostname | tr '[:upper:]' '[:lower:]')\" ]; then
0.355 | 3311: log_debug \"Node is bootstrap\"
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function check_resource_pacemaker {
0.355 | 3311: if [ \"$#\" -ne 3 ]; then
0.355 | 3311: echo_error \"ERROR: check_resource function expects 3 parameters, $# given\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: local service=$1
0.355 | 3311: local state=$2
0.355 | 3311: local timeout=$3
0.355 | 3311:
0.355 | 3311: if [[ -z $(is_bootstrap_node) ]] ; then
0.355 | 3311: log_debug \"Node isn't bootstrap, skipping check for $service to be $state here \"
0.355 | 3311: return
0.355 | 3311: else
0.355 | 3311: log_debug \"Node is bootstrap checking $service to be $state here\"
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: if [ \"$state\" = \"stopped\" ]; then
0.355 | 3311: match_for_incomplete='Started'
0.355 | 3311: else # started
0.355 | 3311: match_for_incomplete='Stopped'
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: nodes_local=$(pcs status | grep ^Online | sed 's/.*\\[ \\(.*\\) \\]/\\1/g' | sed 's/ /\\|/g')
0.355 | 3311: if timeout -k 10 $timeout crm_resource --wait; then
0.355 | 3311: node_states=$(pcs status --full | grep \"$service\" | grep -v Clone | { egrep \"$nodes_local\" || true; } )
0.355 | 3311: if echo \"$node_states\" | grep -q \"$match_for_incomplete\"; then
0.355 | 3311: echo_error \"ERROR: cluster finished transition but $service was not in $state state, exiting.\"
0.355 | 3311: exit 1
0.355 | 3311: else
0.355 | 3311: echo \"$service has $state\"
0.355 | 3311: fi
0.355 | 3311: else
0.355 | 3311: echo_error \"ERROR: cluster remained unstable for more than $timeout seconds, exiting.\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function pcmk_running {
0.355 | 3311: if [[ $(systemctl is-active pacemaker) = \"active\" ]] ; then
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function is_systemd_unknown {
0.355 | 3311: local service=$1
0.355 | 3311: if [[ $(systemctl is-active \"$service\") = \"unknown\" ]]; then
0.355 | 3311: log_debug \"$service found to be unkown to systemd\"
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function grep_is_cluster_controlled {
0.355 | 3311: local service=$1
0.355 | 3311: if [[ -n $(systemctl status $service -l | grep Drop-In -A 5 | grep pacemaker) ||
0.355 | 3311: -n $(systemctl status $service -l | grep \"Cluster Controlled $service\") ]] ; then
0.355 | 3311: log_debug \"$service is pcmk managed from systemctl grep\"
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: function is_systemd_managed {
0.355 | 3311: local service=$1
0.355 | 3311: #if we have pcmk check to see if it is managed there
0.355 | 3311: if [[ -n $(pcmk_running) ]]; then
0.355 | 3311: if [[ -z $(pcs status --full | grep $service) && -z $(is_systemd_unknown $service) ]] ; then
0.355 | 3311: log_debug \"$service found to be systemd managed from pcs status\"
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: else
0.355 | 3311: # if it is \"unknown\" to systemd, then it is pacemaker managed
0.355 | 3311: if [[ -n $(is_systemd_unknown $service) ]] ; then
0.355 | 3311: return
0.355 | 3311: elif [[ -z $(grep_is_cluster_controlled $service) ]] ; then
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function is_pacemaker_managed {
0.355 | 3311: local service=$1
0.355 | 3311: #if we have pcmk check to see if it is managed there
0.355 | 3311: if [[ -n $(pcmk_running) ]]; then
0.355 | 3311: if [[ -n $(pcs status --full | grep $service) ]]; then
0.355 | 3311: log_debug \"$service found to be pcmk managed from pcs status\"
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: else
0.355 | 3311: # if it is unknown to systemd, then it is pcmk managed
0.355 | 3311: if [[ -n $(is_systemd_unknown $service) ]]; then
0.355 | 3311: echo \"true\"
0.355 | 3311: elif [[ -n $(grep_is_cluster_controlled $service) ]] ; then
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function is_managed {
0.355 | 3311: local service=$1
0.355 | 3311: if [[ -n $(is_pacemaker_managed $service) || -n $(is_systemd_managed $service) ]]; then
0.355 | 3311: echo \"true\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function check_resource_systemd {
0.355 | 3311:
0.355 | 3311: if [ \"$#\" -ne 3 ]; then
0.355 | 3311: echo_error \"ERROR: check_resource function expects 3 parameters, $# given\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: local service=$1
0.355 | 3311: local state=$2
0.355 | 3311: local timeout=$3
0.355 | 3311: local check_interval=3
0.355 | 3311:
0.355 | 3311: if [ \"$state\" = \"stopped\" ]; then
0.355 | 3311: match_for_incomplete='active'
0.355 | 3311: else # started
0.355 | 3311: match_for_incomplete='inactive'
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: log_debug \"Going to check_resource_systemd for $service to be $state\"
0.355 | 3311:
0.355 | 3311: #sanity check is systemd managed:
0.355 | 3311: if [[ -z $(is_systemd_managed $service) ]]; then
0.355 | 3311: echo \"ERROR - $service not found to be systemd managed.\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: tstart=$(date +%s)
0.355 | 3311: tend=$(( $tstart + $timeout ))
0.355 | 3311: while (( $(date +%s) < $tend )); do
0.355 | 3311: if [[ \"$(systemctl is-active $service)\" = $match_for_incomplete ]]; then
0.355 | 3311: echo \"$service not yet $state, sleeping $check_interval seconds.\"
0.355 | 3311: sleep $check_interval
0.355 | 3311: else
0.355 | 3311: echo \"$service is $state\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311:
0.355 | 3311: echo \"Timed out waiting for $service to go to $state after $timeout seconds\"
0.355 | 3311: exit 1
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: function check_resource {
0.355 | 3311: local service=$1
0.355 | 3311: local pcmk_managed=$(is_pacemaker_managed $service)
0.355 | 3311: local systemd_managed=$(is_systemd_managed $service)
0.355 | 3311:
0.355 | 3311: if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
0.355 | 3311: log_debug \"ERROR $service managed by both systemd and pcmk - SKIPPING\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: if [[ -n $pcmk_managed ]]; then
0.355 | 3311: check_resource_pacemaker $@
0.355 | 3311: return
0.355 | 3311: elif [[ -n $systemd_managed ]]; then
0.355 | 3311: check_resource_systemd $@
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311: log_debug \"ERROR cannot check_resource for $service, not managed here?\"
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function manage_systemd_service {
0.355 | 3311: local action=$1
0.355 | 3311: local service=$2
0.355 | 3311: log_debug \"Going to systemctl $action $service\"
0.355 | 3311: systemctl $action $service
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function manage_pacemaker_service {
0.355 | 3311: local action=$1
0.355 | 3311: local service=$2
0.355 | 3311: # not if pacemaker isn't running!
0.355 | 3311: if [[ -z $(pcmk_running) ]]; then
0.355 | 3311: echo \"$(facter hostname) pacemaker not active, skipping $action $service here\"
0.355 | 3311: elif [[ -n $(is_bootstrap_node) ]]; then
0.355 | 3311: log_debug \"Going to pcs resource $action $service\"
0.355 | 3311: pcs resource $action $service
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function stop_or_disable_service {
0.355 | 3311: local service=$1
0.355 | 3311: local pcmk_managed=$(is_pacemaker_managed $service)
0.355 | 3311: local systemd_managed=$(is_systemd_managed $service)
0.355 | 3311:
0.355 | 3311: if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
0.355 | 3311: log_debug \"Skipping stop_or_disable $service due to management conflict\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: log_debug \"Stopping or disabling $service\"
0.355 | 3311: if [[ -n $pcmk_managed ]]; then
0.355 | 3311: manage_pacemaker_service disable $service
0.355 | 3311: return
0.355 | 3311: elif [[ -n $systemd_managed ]]; then
0.355 | 3311: manage_systemd_service stop $service
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311: log_debug \"ERROR: $service not managed here?\"
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function start_or_enable_service {
0.355 | 3311: local service=$1
0.355 | 3311: local pcmk_managed=$(is_pacemaker_managed $service)
0.355 | 3311: local systemd_managed=$(is_systemd_managed $service)
0.355 | 3311:
0.355 | 3311: if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
0.355 | 3311: log_debug \"Skipping start_or_enable $service due to management conflict\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: log_debug \"Starting or enabling $service\"
0.355 | 3311: if [[ -n $pcmk_managed ]]; then
0.355 | 3311: manage_pacemaker_service enable $service
0.355 | 3311: return
0.355 | 3311: elif [[ -n $systemd_managed ]]; then
0.355 | 3311: manage_systemd_service start $service
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311: log_debug \"ERROR $service not managed here?\"
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function restart_service {
0.355 | 3311: local service=$1
0.355 | 3311: local pcmk_managed=$(is_pacemaker_managed $service)
0.355 | 3311: local systemd_managed=$(is_systemd_managed $service)
0.355 | 3311:
0.355 | 3311: if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
0.355 | 3311: log_debug \"ERROR $service managed by both systemd and pcmk - SKIPPING\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: log_debug \"Restarting $service\"
0.355 | 3311: if [[ -n $pcmk_managed ]]; then
0.355 | 3311: manage_pacemaker_service restart $service
0.355 | 3311: return
0.355 | 3311: elif [[ -n $systemd_managed ]]; then
0.355 | 3311: manage_systemd_service restart $service
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311: log_debug \"ERROR $service not managed here?\"
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: function echo_error {
0.355 | 3311: echo \"$@\" | tee /dev/fd2
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # swift is a special case because it is/was never handled by pacemaker
0.355 | 3311: # when stand-alone swift is used, only swift-proxy is running on controllers
0.355 | 3311: function systemctl_swift {
0.355 | 3311: services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \\
0.355 | 3311: openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \\
0.355 | 3311: openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy )
0.355 | 3311: local action=$1
0.355 | 3311: case $action in
0.355 | 3311: stop)
0.355 | 3311: services=$(systemctl | grep openstack-swift- | grep running | awk '{print $1}')
0.355 | 3311: ;;
0.355 | 3311: start)
0.355 | 3311: enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml tripleo::profile::base::swift::storage::enable_swift_storage)
0.355 | 3311: if [[ $enable_swift_storage != \"true\" ]]; then
0.355 | 3311: services=( openstack-swift-proxy )
0.355 | 3311: fi
0.355 | 3311: ;;
0.355 | 3311: *) echo \"Unknown action $action passed to systemctl_swift\"
0.355 | 3311: exit 1
0.355 | 3311: ;; # shouldn't ever happen...
0.355 | 3311: esac
0.355 | 3311: for service in ${services[@]}; do
0.355 | 3311: manage_systemd_service $action $service
0.355 | 3311: done
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
0.355 | 3311: # Update condition and add --notriggerun for +bug/1669714
0.355 | 3311: function special_case_ovs_upgrade_if_needed {
0.355 | 3311: if rpm -qa | grep \"^openvswitch-2.5.0-14\" || rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep \"systemctl.*try-restart\" ; then
0.355 | 3311: echo \"Manual upgrade of openvswitch - ovs-2.5.0-14 or restart in postun detected\"
0.355 | 3311: rm -rf OVS_UPGRADE
0.355 | 3311: mkdir OVS_UPGRADE && pushd OVS_UPGRADE
0.355 | 3311: echo \"Attempting to downloading latest openvswitch with yumdownloader\"
0.355 | 3311: yumdownloader --resolve openvswitch
0.355 | 3311: for pkg in $(ls -1 *.rpm); do
0.355 | 3311: if rpm -U --test $pkg 2>&1 | grep \"already installed\" ; then
0.355 | 3311: echo \"Looks like newer version of $pkg is already installed, skipping\"
0.355 | 3311: else
0.355 | 3311: echo \"Updating $pkg with --nopostun --notriggerun\"
0.355 | 3311: rpm -U --replacepkgs --nopostun --notriggerun $pkg
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: popd
0.355 | 3311: else
0.355 | 3311: echo \"Skipping manual upgrade of openvswitch - no restart in postun detected\"
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # This code is meant to fix https://bugs.launchpad.net/tripleo/+bug/1686357 on
0.355 | 3311: # existing setups via a minor update workflow and be idempotent. We need to
0.355 | 3311: # run this before the yum update because we fix this up even when there are no
0.355 | 3311: # packages to update on the system (in which case the script exits).
0.355 | 3311: # This code must be called with set +eu (due to the ocf scripts being sourced)
0.355 | 3311: function fixup_wrong_ipv6_vip {
0.355 | 3311: # This XPath query identifies of all the VIPs in pacemaker with netmask /64. Those are IPv6 only resources that have the wrong netmask
0.355 | 3311: # This gives the address of the resource in the CIB, one address per line. For example:
0.355 | 3311: # /cib/configuration/resources/primitive[@id='ip-2001.db8.ca2.4..10']/instance_attributes[@id='ip-2001.db8.ca2.4..10-instance_attributes']\\
0.355 | 3311: # /nvpair[@id='ip-2001.db8.ca2.4..10-instance_attributes-cidr_netmask']
0.355 | 3311: vip_xpath_query=\"//resources/primitive[@type='IPaddr2']/instance_attributes/nvpair[@name='cidr_netmask' and @value='64']\"
0.355 | 3311: vip_xpath_xml_addresses=$(cibadmin --query --xpath \"$vip_xpath_query\" -e 2>/dev/null)
0.355 | 3311: # The following extracts the @id value of the resource
0.355 | 3311: vip_resources_to_fix=$(echo -e \"$vip_xpath_xml_addresses\" | sed -n \"s/.*primitive\\[@id='\\([^']*\\)'.*/\\1/p\")
0.355 | 3311: # Runnning this in a subshell so that sourcing files cannot possibly affect the running script
0.355 | 3311: (
0.355 | 3311: OCF_PATH=\"/usr/lib/ocf/lib/heartbeat\"
0.355 | 3311: if [ -n \"$vip_resources_to_fix\" -a -f $OCF_PATH/ocf-shellfuncs -a -f $OCF_PATH/findif.sh ]; then
0.355 | 3311: source $OCF_PATH/ocf-shellfuncs
0.355 | 3311: source $OCF_PATH/findif.sh
0.355 | 3311: for resource in $vip_resources_to_fix; do
0.355 | 3311: echo \"Updating IPv6 VIP $resource with a /128 and a correct addrlabel\"
0.355 | 3311: # The following will give us something like:
0.355 | 3311: # <nvpair id=\"ip-2001.db8.ca2.4..10-instance_attributes-ip\" name=\"ip\" value=\"2001:db8:ca2:4::10\"/>
0.355 | 3311: ip_cib_nvpair=$(cibadmin --query --xpath \"//resources/primitive[@type='IPaddr2' and @id='$resource']/instance_attributes/nvpair[@name='ip']\")
0.355 | 3311: # Let's filter out the value of the nvpair to get the ip address
0.355 | 3311: ip_address=$(echo $ip_cib_nvpair | xmllint --xpath 'string(//nvpair/@value)' -)
0.355 | 3311: OCF_RESKEY_cidr_netmask=\"64\"
0.355 | 3311: OCF_RESKEY_ip=\"$ip_address\"
0.355 | 3311: # Unfortunately due to https://bugzilla.redhat.com/show_bug.cgi?id=1445628
0.355 | 3311: # we need to find out the appropiate nic given the ip address.
0.355 | 3311: nic=$(findif $ip_address | awk '{ print $1 }')
0.355 | 3311: ret=$?
0.355 | 3311: if [ -z \"$nic\" -o $ret -ne 0 ]; then
0.355 | 3311: echo \"NIC autodetection failed for VIP $ip_address, not updating VIPs\"
0.355 | 3311: # Only exits the subshell
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: ocf_run -info pcs resource update --wait \"$resource\" ip=\"$ip_address\" cidr_netmask=128 nic=\"$nic\" lvs_ipv6_addrlabel=true lvs_ipv6_addrlabel_value=99
0.355 | 3311: ret=$?
0.355 | 3311: if [ $ret -ne 0 ]; then
0.355 | 3311: echo \"pcs resource update for VIP $resource failed, not updating VIPs\"
0.355 | 3311: # Only exits the subshell
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: fi
0.355 | 3311: )
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # https://bugs.launchpad.net/tripleo/+bug/1704131 guard against yum update
0.355 | 3311: # waiting for an existing process until the heat stack time out
0.355 | 3311: function check_for_yum_lock {
0.355 | 3311: if [[ -f /var/run/yum.pid ]] ; then
0.355 | 3311: ERR=\"ERROR existing yum.pid detected - can't continue! Please ensure
0.355 | 3311: there is no other package update process for the duration of the minor update
0.355 | 3311: worfklow. Exiting.\"
0.355 | 3311: echo $ERR
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # This function tries to resolve an RPM dependency issue that can arise when
0.355 | 3311: # updating ceph packages on nodes that do not run the ceph-osd service. These
0.355 | 3311: # nodes do not require the ceph-osd package, and updates will fail if the
0.355 | 3311: # ceph-osd package cannot be updated because it's not available in any enabled
0.355 | 3311: # repo. The dependency issue is resolved by removing the ceph-osd package from
0.355 | 3311: # nodes that don't require it.
0.355 | 3311: #
0.355 | 3311: # No change is made to nodes that use the ceph-osd service (e.g. ceph storage
0.355 | 3311: # nodes, and hyperconverged nodes running ceph-osd and compute services). The
0.355 | 3311: # ceph-osd package is left in place, and the currently enabled repos will be
0.355 | 3311: # used to update all ceph packages.
0.355 | 3311: function yum_pre_update {
0.355 | 3311: echo \"Checking for ceph-osd dependency issues\"
0.355 | 3311:
0.355 | 3311: # No need to proceed if the ceph-osd package isn't installed
0.355 | 3311: if ! rpm -q ceph-osd >/dev/null 2>&1; then
0.355 | 3311: echo \"ceph-osd package is not installed\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: # Do not proceed if there's any sign that the ceph-osd package is in use:
0.355 | 3311: # - Are there OSD entries in /var/lib/ceph/osd?
0.355 | 3311: # - Are any ceph-osd processes running?
0.355 | 3311: # - Are there any ceph data disks (as identified by 'ceph-disk')
0.355 | 3311: if [ -n \"$(ls -A /var/lib/ceph/osd 2>/dev/null)\" ]; then
0.355 | 3311: echo \"ceph-osd package is required (there are OSD entries in /var/lib/ceph/osd)\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: if [ \"$(pgrep -xc ceph-osd)\" != \"0\" ]; then
0.355 | 3311: echo \"ceph-osd package is required (there are ceph-osd processes running)\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: if ceph-disk list |& grep -q \"ceph data\"; then
0.355 | 3311: echo \"ceph-osd package is required (ceph data disks detected)\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: # Get a list of all ceph packages available from the currently enabled
0.355 | 3311: # repos. Use \"--showduplicates\" to ensure the list includes installed
0.355 | 3311: # packages that happen to be up to date.
0.355 | 3311: local ceph_pkgs=\"$(yum list available --showduplicates 'ceph-*' |& awk '/^ceph/ {print $1}' | sort -u)\"
0.355 | 3311:
0.355 | 3311: # No need to proceed if no ceph packages are available from the currently
0.355 | 3311: # enabled repos.
0.355 | 3311: if [ -z \"$ceph_pkgs\" ]; then
0.355 | 3311: echo \"ceph packages are not available from any enabled repo\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: # No need to proceed if the ceph-osd package *is* available
0.355 | 3311: if [[ $ceph_pkgs =~ ceph-osd ]]; then
0.355 | 3311: echo \"ceph-osd package is available from an enabled repo\"
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: echo \"ceph-osd package is not required, but is preventing updates to other ceph packages\"
0.355 | 3311: echo \"Removing ceph-osd package to allow updates to other ceph packages\"
0.355 | 3311: yum -y remove ceph-osd
0.355 | 3311: }
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Swift common swift settings.\", \"value\": {\"service_name\": \"swift_base\", \"config_settings\": {\"swift::swift_hash_path_suffix\": {\"get_param\": \"SwiftHashSuffix\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Swift Proxy service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"SwiftHashSuffix\": {\"hidden\": true, \"type\": \"string\", \"description\": \"A random string to be used as a salt when hashing to determine mappings in the ring.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/post-upgrade.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Post-deploy configuration steps via puppet for all roles, as defined in ../roles_data.yaml\
0.355 | 3311: \", \"parameters\": {\"DockerPuppetDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debug logging with docker-puppet.py\"}, \"role_data\": {\"type\": \"json\", \"description\": \"Mapping of Role name e.g Controller to the per-role data\"}, \"stack_name\": {\"type\": \"string\", \"description\": \"Name of the topmost stack\"}, \"servers\": {\"type\": \"json\", \"description\": \"Mapping of Role name e.g Controller to a list of servers\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"DeployIdentifier\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Setting this to a unique value will re-run any deployment tasks which perform configuration on a Heat stack-update.\
0.355 | 3311: \"}, \"DockerPuppetProcessCount\": {\"default\": 3, \"type\": \"number\", \"description\": \"Number of concurrent processes to use when running docker-puppet to generate config files.\"}, \"ctlplane_service_ips\": {\"type\": \"json\"}}, \"outputs\": {\"RoleConfig\": {\"description\": \"Mapping of config data for all roles\", \"value\": {\"upgrade_steps_tasks\": \"- include: Controller/upgrade_tasks.yaml\
0.355 | 3311: when: role_name == 'Controller'\
0.355 | 3311: \", \"update_steps_tasks\": \"- include: Controller/update_tasks.yaml\
0.355 | 3311: when: role_name == 'Controller'\
0.355 | 3311: \", \"update_steps_playbook\": \"- hosts: overcloud\
0.355 | 3311: serial: 1\
0.355 | 3311: tasks:\
0.355 | 3311: - include: update_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: - include: deploy_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: \", \"deploy_steps_playbook\": \"- hosts: overcloud\
0.355 | 3311: tasks:\
0.355 | 3311: - include: Controller/host_prep_tasks.yaml\
0.355 | 3311: when: role_name == 'Controller'\
0.355 | 3311: - include: deploy_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: \", \"upgrade_steps_playbook\": \"- hosts: overcloud\
0.355 | 3311: tasks:\
0.355 | 3311: - include: upgrade_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: - include: deploy_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\", \"deploy_steps_tasks\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/deploy-steps-tasks.yaml\"}}}}, \"conditions\": {\"WorkflowTasks_Step1_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step1\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step4_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step4\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step3_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step3\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step5_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step5\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step2_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step2\"]}, \"\"]}}, false]}}, \"resources\": {\"ControllerPostConfig\": {\"depends_on\": [\"ControllerExtraConfigPost\"], \"type\": \"OS::TripleO::Tasks::ControllerPostConfig\", \"properties\": {\"input_values\": {\"update_identifier\": {\"get_param\": \"DeployIdentifier\"}}, \"servers\": {\"get_param\": \"servers\"}}}, \"WorkflowTasks_Step5_Execution\": {\"depends_on\": \"WorkflowTasks_Step5\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step5\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step5\"}}}}, \"condition\": \"WorkflowTasks_Step5_Enabled\"}, \"WorkflowTasks_Step3_Execution\": {\"depends_on\": \"WorkflowTasks_Step3\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step3\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step3\"}}}}, \"condition\": \"WorkflowTasks_Step3_Enabled\"}, \"ControllerHostPrepDeployment\": {\"type\": \"OS::Heat::SoftwareDeploymentGroup\", \"properties\": {\"config\": {\"get_resource\": \"ControllerHostPrepConfig\"}, \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerPreConfig\": {\"depends_on\": \"ControllerHostPrepDeployment\", \"type\": \"OS::TripleO::Tasks::ControllerPreConfig\", \"properties\": {\"input_values\": {\"update_identifier\": {\"get_param\": \"DeployIdentifier\"}}, \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step5\": {\"depends_on\": [\"WorkflowTasks_Step5_Execution\", \"ControllerDeployment_Step4\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 5}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step5\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step1\": {\"depends_on\": [\"WorkflowTasks_Step1_Execution\", \"ControllerPreConfig\", \"ControllerArtifactsDeploy\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 1}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step1\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step2\": {\"depends_on\": [\"WorkflowTasks_Step2_Execution\", \"ControllerDeployment_Step1\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 2}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step2\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step3\": {\"depends_on\": [\"WorkflowTasks_Step3_Execution\", \"ControllerDeployment_Step2\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 3}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step3\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step4\": {\"depends_on\": [\"WorkflowTasks_Step4_Execution\", \"ControllerDeployment_Step3\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 4}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step4\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"WorkflowTasks_Step5\": {\"depends_on\": [\"ControllerDeployment_Step4\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step5')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step5\"]]}}, \"condition\": \"WorkflowTasks_Step5_Enabled\"}, \"WorkflowTasks_Step1\": {\"depends_on\": [\"ControllerPreConfig\", \"ControllerArtifactsDeploy\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step1')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step1\"]]}}, \"condition\": \"WorkflowTasks_Step1_Enabled\"}, \"RoleConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"name\": \"step\"}, {\"name\": \"role_name\"}, {\"name\": \"update_identifier\"}, {\"name\": \"bootstrap_server_id\"}, {\"name\": \"docker_puppet_debug\"}, {\"name\": \"docker_puppet_process_count\"}], \"config\": {\"str_replace\": {\"params\": {\"_TASKS\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/deploy-steps-tasks.yaml\"}}, \"template\": \"- hosts: localhost\
0.355 | 3311: connection: local\
0.355 | 3311: tasks:\
0.355 | 3311: _TASKS\
0.355 | 3311: \"}}, \"options\": {\"modulepath\": \"/usr/share/ansible-modules\"}, \"group\": \"ansible\"}}, \"WorkflowTasks_Step2_Execution\": {\"depends_on\": \"WorkflowTasks_Step2\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step2\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step2\"}}}}, \"condition\": \"WorkflowTasks_Step2_Enabled\"}, \"WorkflowTasks_Step4\": {\"depends_on\": [\"ControllerDeployment_Step3\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step4')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step4\"]]}}, \"condition\": \"WorkflowTasks_Step4_Enabled\"}, \"WorkflowTasks_Step1_Execution\": {\"depends_on\": \"WorkflowTasks_Step1\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step1\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step1\"}}}}, \"condition\": \"WorkflowTasks_Step1_Enabled\"}, \"WorkflowTasks_Step3\": {\"depends_on\": [\"ControllerDeployment_Step2\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step3')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step3\"]]}}, \"condition\": \"WorkflowTasks_Step3_Enabled\"}, \"ControllerArtifactsConfig\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/deploy-artifacts.yaml\"}, \"ControllerExtraConfigPost\": {\"depends_on\": [\"ControllerDeployment_Step5\"], \"type\": \"OS::TripleO::NodeExtraConfigPost\", \"properties\": {\"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"WorkflowTasks_Step2\": {\"depends_on\": [\"ControllerDeployment_Step1\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step2')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step2\"]]}}, \"condition\": \"WorkflowTasks_Step2_Enabled\"}, \"ControllerHostPrepConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"ansible\", \"config\": {\"str_replace\": {\"params\": {\"_PLAYBOOK\": [{\"connection\": \"local\", \"tasks\": {\"list_concat\": [{\"get_param\": [\"role_data\", \"Controller\", \"host_prep_tasks\"]}, [{\"name\": \"Create /var/lib/tripleo-config directory\", \"file\": \"path=/var/lib/tripleo-config state=directory\"}, {\"copy\": \"content=\\\"{{puppet_step_config}}\\\" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes mode=0600\", \"name\": \"Write the puppet step_config manifest\"}, {\"name\": \"Create /var/lib/docker-puppet\", \"file\": \"path=/var/lib/docker-puppet state=directory\"}, {\"copy\": \"content=\\\"{{puppet_config | to_json}}\\\" dest=/var/lib/docker-puppet/docker-puppet.json force=yes mode=0600\", \"name\": \"Write docker-puppet-tasks json files\"}, {\"copy\": \"content=\\\"{{docker_puppet_script}}\\\" dest=/var/lib/docker-puppet/docker-puppet.py force=yes mode=0600\", \"name\": \"Write docker-puppet.py\"}, {\"copy\": \"content=\\\"{{docker_startup_configs | to_json}}\\\" dest=/var/lib/docker-container-startup-configs.json force=yes mode=0600\", \"name\": \"Write docker-container-startup-configs\"}, {\"with_dict\": \"{{docker_startup_configs}}\", \"copy\": \"content=\\\"{{item.value|to_json}}\\\" dest=\\\"/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json\\\" force=yes mode=0600\", \"name\": \"Write per-step docker-container-startup-configs\"}, {\"name\": \"Create /var/lib/kolla/config_files directory\", \"file\": \"path=/var/lib/kolla/config_files state=directory\"}, {\"with_dict\": \"{{kolla_config}}\", \"copy\": \"content=\\\"{{item.value|to_json}}\\\" dest=\\\"{{item.key}}\\\" force=yes mode=0600\", \"name\": \"Write kolla config json files\"}, {\"with_fileglob\": [\"/var/lib/docker-puppet/docker-puppet-tasks*.json\"], \"when\": \"deploy_server_id == bootstrap_server_id\", \"name\": \"Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files\", \"file\": {\"path\": \"{{item}}\", \"state\": \"absent\"}}, {\"with_dict\": \"{{docker_puppet_tasks}}\", \"copy\": \"content=\\\"{{item.value|to_json}}\\\" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace(\\\"step_\\\", \\\"\\\")}}.json force=yes mode=0600\", \"when\": \"deploy_server_id == bootstrap_server_id\", \"name\": \"Write docker-puppet-tasks json files\"}]]}, \"hosts\": \"localhost\", \"vars\": {\"kolla_config\": {\"get_param\": [\"role_data\", \"Controller\", \"kolla_config\"]}, \"docker_puppet_tasks\": {\"get_param\": [\"role_data\", \"Controller\", \"docker_puppet_tasks\"]}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"puppet_step_config\": {\"get_param\": [\"role_data\", \"Controller\", \"step_config\"]}, \"docker_puppet_script\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/docker/docker-puppet.py\"}, \"docker_startup_configs\": {\"get_param\": [\"role_data\", \"Controller\", \"docker_config\"]}, \"puppet_config\": {\"get_param\": [\"role_data\", \"Controller\", \"puppet_config\"]}}}]}, \"template\": \"_PLAYBOOK\"}}, \"options\": {\"modulepath\": \"/usr/share/ansible-modules\"}}}, \"ControllerArtifactsDeploy\": {\"type\": \"OS::Heat::StructuredDeploymentGroup\", \"properties\": {\"config\": {\"get_resource\": \"ControllerArtifactsConfig\"}, \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"WorkflowTasks_Step4_Execution\": {\"depends_on\": \"WorkflowTasks_Step4\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step4\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step4\"}}}}, \"condition\": \"WorkflowTasks_Step4_Enabled\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-statsd.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Gnocchi role.\", \"value\": {\"service_name\": \"gnocchi_statsd\", \"step_config\": \"include ::tripleo::profile::base::gnocchi::statsd\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"GnocchiServiceBase\", \"role_data\", \"config_settings\"]}, {\"tripleo.gnocchi_statsd.firewall_rules\": {\"140 gnocchi-statsd\": {\"dport\": 8125, \"proto\": \"udp\"}}}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"gnocchi_statsd_enabled\", \"command\": \"systemctl is-enabled openstack-gnocchi-statsd\", \"name\": \"Check if gnocchi_statsd is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-gnocchi-statsd' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"gnocchi_statsd_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-gnocchi-statsd is running\", \"tags\": \"step0,validation\"}, {\"when\": \"gnocchi_statsd_enabled.rc == 0\", \"name\": \"Stop gnocchi_statsd service\", \"service\": \"name=openstack-gnocchi-statsd state=stopped\", \"tags\": \"step1\"}], \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionGnocchiStatsd\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"Gnocchi service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"MonitoringSubscriptionGnocchiStatsd\": {\"default\": \"overcloud-gnocchi-statsd\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"GnocchiServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml": "{\"outputs\": {\"ip_subnet\": {\"description\": \"IP/Subnet CIDR for the pass thru network IP\", \"value\": {\"list_join\": [\"\", [{\"get_param\": \"ControlPlaneIP\"}, \"/\", {\"get_param\": \"ControlPlaneSubnetCidr\"}]]}}, \"ip_address_uri\": {\"description\": \"pass thru network IP (for compatibility with vip_v6.yaml)\", \"value\": {\"get_param\": \"ControlPlaneIP\"}}, \"ip_address\": {\"description\": \"pass thru network IP\", \"value\": {\"get_param\": \"ControlPlaneIP\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Returns the control plane port (provisioning network) as the ip_address.\
0.355 | 3311: \", \"parameters\": {\"FixedIPs\": {\"default\": [], \"type\": \"json\", \"description\": \"Control the IP allocation for the VIP port. E.g. [{'ip_address':'1.2.3.4'}]\
0.355 | 3311: \"}, \"NodeIndex\": {\"default\": 0, \"type\": \"number\", \"description\": \"Index of the IP to get from Pool\"}, \"ControlPlaneNetwork\": {\"default\": \"ctlplane\", \"type\": \"string\", \"description\": \"The name of the undercloud Neutron control plane\"}, \"ControlPlaneIP\": {\"type\": \"string\", \"description\": \"IP address on the control plane\"}, \"ControlPlaneSubnetCidr\": {\"default\": \"24\", \"type\": \"string\", \"description\": \"The subnet CIDR of the control plane network.\"}, \"ServiceName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Name of the service to lookup\"}, \"PortName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Name of the port\"}, \"IPPool\": {\"default\": {}, \"type\": \"json\", \"description\": \"A network mapped list of IPs\"}, \"NetworkName\": {\"default\": \"ctlplane\", \"type\": \"string\", \"description\": \"Name of the network where the VIP will be created\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/panko-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Panko role.\", \"value\": {\"service_name\": \"panko_base\", \"config_settings\": {\"panko::auth::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"panko::auth::auth_password\": {\"get_param\": \"PankoPassword\"}, \"panko::keystone::authtoken::project_domain_name\": \"Default\", \"panko::keystone::authtoken::user_domain_name\": \"Default\", \"panko::keystone::authtoken::project_name\": \"service\", \"panko::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"panko::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"panko::auth::auth_region\": \"regionOne\", \"panko::db::database_connection\": {\"make_url\": {\"username\": \"panko\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"PankoPassword\"}, \"path\": \"/panko\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"panko::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"PankoDebug\"}]}, \"panko::keystone::authtoken::password\": {\"get_param\": \"PankoPassword\"}, \"panko::auth::auth_tenant_name\": \"service\"}, \"service_config_settings\": {\"keystone\": {\"panko::keystone::auth::tenant\": \"service\", \"panko::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"uri\"]}, \"panko::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"uri\"]}, \"panko::keystone::auth::password\": {\"get_param\": \"PankoPassword\"}, \"panko::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"panko::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"uri\"]}}, \"mysql\": {\"panko::db::mysql::password\": {\"get_param\": \"PankoPassword\"}, \"panko::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"panko::db::mysql::user\": \"panko\", \"panko::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"panko::db::mysql::dbname\": \"panko\"}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"PankoDebug\"}, \"\"]}}, \"description\": \"OpenStack Panko service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"PankoPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the panko services.\"}, \"PankoDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Panko services.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-compute-plugin-nuage.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron Compute Nuage plugin\", \"value\": {\"service_name\": \"neutron_compute_plugin_nuage\", \"step_config\": \"include ::tripleo::profile::base::neutron::agents::nuage\", \"config_settings\": {\"nuage::metadataagent::metadata_secret\": {\"get_param\": \"NuageMetadataProxySharedSecret\"}, \"nuage::metadataagent::nova_os_username\": {\"get_param\": \"NuageNovaOsUsername\"}, \"tripleo::profile::base::neutron::agents::nuage::nova_os_password\": {\"get_param\": \"NovaPassword\"}, \"nuage::metadataagent::metadata_port\": {\"get_param\": \"NuageMetadataPort\"}, \"nuage::metadataagent::nova_client_version\": {\"get_param\": \"NuageNovaClientVersion\"}, \"nuage::metadataagent::nova_metadata_port\": {\"get_param\": \"NuageNovaMetadataPort\"}, \"tripleo::profile::base::neutron::agents::nuage::nova_auth_ip\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}, \"nuage::metadataagent::metadata_agent_start_with_ovs\": {\"get_param\": \"NuageMetadataAgentStartWithOvs\"}, \"nuage::metadataagent::nova_region_name\": {\"get_param\": \"NuageNovaRegionName\"}, \"nuage::vrs::standby_controller\": {\"get_param\": \"NuageStandbyController\"}, \"nuage::vrs::active_controller\": {\"get_param\": \"NuageActiveController\"}, \"tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name\": \"service\", \"tripleo.neutron_compute_plugin_nuage.firewall_rules\": {\"118 neutron vxlan networks\": {\"dport\": 4789, \"proto\": \"udp\"}, \"100 metadata agent\": {\"dport\": {\"get_param\": \"NuageMetadataPort\"}}}, \"nuage::metadataagent::nova_api_endpoint_type\": {\"get_param\": \"NuageNovaApiEndpoint\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron Compute Nuage plugin\
0.355 | 3311: \", \"parameters\": {\"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NuageNovaApiEndpoint\": {\"default\": \"publicURL\", \"type\": \"string\", \"description\": \"One of publicURL, internalURL, adminURL in \\\"keystone endpoint-list\\\"\"}, \"NuageNovaOsUsername\": {\"default\": \"nova\", \"type\": \"string\", \"description\": \"Nova username in keystone_authtoken\"}, \"NuageStandbyController\": {\"type\": \"string\", \"description\": \"IP address of the Standby Virtualized Services Controller (VSC)\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NovaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the nova service and db account\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NuageNovaRegionName\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Region name in \\\"keystone endpoint-list\\\"\"}, \"NuageActiveController\": {\"type\": \"string\", \"description\": \"IP address of the Active Virtualized Services Controller (VSC)\"}, \"NuageNovaClientVersion\": {\"default\": \"2\", \"type\": \"string\", \"description\": \"Client Version Nova\"}, \"NuageMetadataProxySharedSecret\": {\"type\": \"string\", \"description\": \"Shared secret to sign the instance-id request\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NuageMetadataAgentStartWithOvs\": {\"default\": \"True\", \"type\": \"string\", \"description\": \"Set to True if nuage-metadata-agent needs to be started with nuage-openvswitch-switch\"}, \"NuageMetadataPort\": {\"default\": \"9697\", \"type\": \"string\", \"description\": \"TCP Port to listen for metadata server requests\"}, \"NuageNovaMetadataPort\": {\"default\": \"8775\", \"type\": \"string\", \"description\": \"TCP Port used by Nova metadata server\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/firstboot/userdata_heat_admin.yaml": "{\"outputs\": {\"OS::stack_id\": {\"value\": {\"get_resource\": \"userdata\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Uses cloud-init to create an additional user with a known name, in addition to the distro-default user created by the cloud-init default.\
0.355 | 3311: \", \"parameters\": {\"node_admin_username\": {\"default\": \"heat-admin\", \"type\": \"string\"}, \"node_admin_extra_ssh_keys\": {\"default\": [], \"type\": \"comma_delimited_list\"}}, \"resources\": {\"userdata\": {\"type\": \"OS::Heat::MultipartMime\", \"properties\": {\"parts\": [{\"config\": {\"get_resource\": \"user_config\"}}]}}, \"user_config\": {\"type\": \"OS::Heat::CloudConfig\", \"properties\": {\"cloud_config\": {\"user\": {\"get_param\": \"node_admin_username\"}, \"ssh_authorized_keys\": {\"get_param\": \"node_admin_extra_ssh_keys\"}}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/mongodb-disabled.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the disabled MongoDB role.\", \"value\": {\"service_name\": \"mongodb_disabled\", \"upgrade_tasks\": [{\"stat\": \"path=/usr/lib/systemd/system/mongod.service\", \"register\": \"mongod_service\", \"name\": \"Check for mongodb service\", \"tags\": \"common\"}, {\"when\": \"mongod_service.stat.exists\", \"name\": \"Stop and disable mongodb service on upgrade\", \"service\": \"name=mongod state=stopped enabled=no\", \"tags\": \"step1\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"Mongodb service, disabled by default since pike\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sahara-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Sahara base service.\", \"value\": {\"service_name\": \"sahara_base\", \"config_settings\": {\"sahara::db::database_max_retries\": -1, \"sahara::database_connection\": {\"make_url\": {\"username\": \"sahara\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"SaharaPassword\"}, \"path\": \"/sahara\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"sahara::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"sahara::use_neutron\": true, \"sahara::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"sahara::keystone::authtoken::project_name\": \"service\", \"sahara::rpc_backend\": \"rabbit\", \"sahara::keystone::authtoken::password\": {\"get_param\": \"SaharaPassword\"}, \"sahara::notify::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"sahara::plugins\": {\"get_param\": \"SaharaPlugins\"}, \"sahara::rabbit_user\": {\"get_param\": \"RabbitUserName\"}, \"sahara::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"sahara::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"SaharaDebug\"}]}, \"sahara::admin_password\": {\"get_param\": \"SaharaPassword\"}, \"sahara::db::database_db_max_retries\": -1, \"sahara::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"sahara::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"SaharaDebug\"}, \"\"]}}, \"description\": \"OpenStack Sahara base service. Shared for all Sahara services.\
0.355 | 3311: \", \"parameters\": {\"SaharaDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Sahara services.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"SaharaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the sahara service account, used by sahara-api.\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"SaharaPlugins\": {\"default\": [\"ambari\", \"cdh\", \"mapr\", \"vanilla\", \"spark\", \"storm\"], \"type\": \"comma_delimited_list\", \"description\": \"Sahara enabled plugin list\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pacemaker_resource_restart.sh": "#!/bin/bash
0.355 | 3311:
0.355 | 3311: set -eux
0.355 | 3311:
0.355 | 3311: # Run if pacemaker is running, we're the bootstrap node,
0.355 | 3311: # and we're updating the deployment (not creating).
0.355 | 3311:
0.355 | 3311: RESTART_FOLDER=\"/var/lib/tripleo/pacemaker-restarts\"
0.355 | 3311:
0.355 | 3311: if [[ -d \"$RESTART_FOLDER\" && -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
0.355 | 3311:
0.355 | 3311: TIMEOUT=600
0.355 | 3311: PCS_STATUS_OUTPUT=\"$(pcs status)\"
0.355 | 3311: SERVICES_TO_RESTART=\"$(ls $RESTART_FOLDER)\"
0.355 | 3311:
0.355 | 3311: for service in $SERVICES_TO_RESTART; do
0.355 | 3311: if ! echo \"$PCS_STATUS_OUTPUT\" | grep $service; then
0.355 | 3311: echo \"Service $service not found as a pacemaker resource, cannot restart it.\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311:
0.355 | 3311: for service in $SERVICES_TO_RESTART; do
0.355 | 3311: echo \"Restarting $service...\"
0.355 | 3311: pcs resource restart --wait=$TIMEOUT $service
0.355 | 3311: rm -f \"$RESTART_FOLDER\"/$service
0.355 | 3311: done
0.355 | 3311:
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: if [ $(systemctl is-active haproxy) = \"active\" ]; then
0.355 | 3311: systemctl reload haproxy
0.355 | 3311: fi
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron base service.\", \"value\": {\"service_name\": \"neutron_base\", \"config_settings\": {\"map_merge\": [{\"neutron::purge_config\": {\"get_param\": \"EnableConfigPurge\"}, \"neutron::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"NeutronDebug\"}]}, \"neutron::host\": \"%{::fqdn}\", \"neutron::global_physnet_mtu\": {\"get_param\": \"NeutronGlobalPhysnetMtu\"}, \"neutron::service_plugins\": {\"get_param\": \"NeutronServicePlugins\"}, \"neutron::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"neutron::db::database_db_max_retries\": -1, \"neutron::db::sync::db_sync_timeout\": {\"get_param\": \"DatabaseSyncTimeout\"}, \"neutron::core_plugin\": {\"get_param\": \"NeutronCorePlugin\"}, \"neutron::rabbit_heartbeat_timeout_threshold\": 60, \"neutron::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"neutron::dns_domain\": {\"get_param\": \"NeutronDnsDomain\"}, \"neutron::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"neutron::dhcp_agent_notification\": {\"get_param\": \"DhcpAgentNotification\"}, \"neutron::rabbit_user\": {\"get_param\": \"RabbitUserName\"}, \"neutron::db::sync::extra_params\": {\"get_param\": \"NeutronDBSyncExtraParams\"}, \"neutron::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"neutron::allow_overlapping_ips\": true, \"neutron::db::database_max_retries\": -1}, {\"if\": [\"dhcp_agents_zero\", {}, {\"tripleo::profile::base::neutron::dhcp_agents_per_network\": {\"get_param\": \"NeutronDhcpAgentsPerNetwork\"}}]}]}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"NeutronDebug\"}, \"\"]}, \"dhcp_agents_zero\": {\"equals\": [{\"get_param\": \"NeutronDhcpAgentsPerNetwork\"}, 0]}}, \"description\": \"OpenStack Neutron base service. Shared for all Neutron agents.\
0.355 | 3311: \", \"parameters\": {\"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"NeutronDBSyncExtraParams\": {\"default\": \"\", \"type\": \"string\", \"description\": \"String of extra command line parameters to append to the neutron-db-manage\
0.355 | 3311: upgrade head command.\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"NeutronDhcpAgentsPerNetwork\": {\"default\": 0, \"type\": \"number\", \"description\": \"The number of neutron dhcp agents to schedule per network\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DatabaseSyncTimeout\": {\"default\": 300, \"type\": \"number\", \"description\": \"DB Sync Timeout default\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"DhcpAgentNotification\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether or not to enable DHCP agent notifications.\"}, \"NeutronGlobalPhysnetMtu\": {\"default\": 1500, \"type\": \"number\", \"description\": \"MTU of the underlying physical network. Neutron uses this value to\
0.355 | 3311: calculate MTU for all virtual network components. For flat and VLAN\
0.355 | 3311: networks, neutron uses this value without modification. For overlay\
0.355 | 3311: networks such as VXLAN, neutron automatically subtracts the overlay\
0.355 | 3311: protocol overhead from this value.\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NeutronServicePlugins\": {\"default\": \"router,qos,trunk\", \"type\": \"comma_delimited_list\", \"description\": \"Comma-separated list of service plugin entrypoints to be loaded from the\
0.355 | 3311: neutron.service_plugins namespace.\
0.355 | 3311: \"}, \"NeutronCorePlugin\": {\"default\": \"ml2\", \"type\": \"string\", \"description\": \"The core plugin for Neutron. The value should be the entrypoint to be loaded\
0.355 | 3311: from neutron.core_plugins namespace.\
0.355 | 3311: \"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EnableConfigPurge\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Remove configuration that is not generated by TripleO. Used to avoid configuration remnants after upgrades.\
0.355 | 3311: \"}, \"NeutronDnsDomain\": {\"default\": \"openstacklocal\", \"type\": \"string\", \"description\": \"Domain to use for building the hostnames.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NeutronDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Neutron services.\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-dispersion.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Swift dispersion tool.\", \"value\": {\"service_name\": \"swift_dispersion\", \"step_config\": \"include ::tripleo::profile::base::swift::dispersion\", \"config_settings\": {\"swift::dispersion::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneV3Internal\", \"uri\"]}, \"swift::dispersion::auth_version\": 3, \"swift::dispersion::auth_user\": \"swift\", \"swift::dispersion::auth_tenant\": \"service\", \"swift::dispersion::auth_pass\": {\"get_param\": \"SwiftPassword\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Swift dispersion tool configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"SwiftPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the swift service account\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-storage.yaml": "{\"parameter_groups\": [{\"description\": \"Do not use deprecated params, they will be removed.\", \"parameters\": [\"ControllerEnableSwiftStorage\"], \"label\": \"deprecated\"}], \"heat_template_version\": \"pike\", \"description\": \"OpenStack Swift Storage service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"SwiftAccountWorkers\": {\"default\": \"auto\", \"type\": \"string\", \"description\": \"Number of workers for Swift account service.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ControllerEnableSwiftStorage\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to enable Swift Storage on the Controller\"}, \"SwiftRawDisks\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"SwiftObjectWorkers\": {\"default\": \"auto\", \"type\": \"string\", \"description\": \"Number of workers for Swift account service.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"SwiftMountCheck\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Value of mount_check in Swift account/container/object -server.conf\"}, \"SwiftContainerWorkers\": {\"default\": \"auto\", \"type\": \"string\", \"description\": \"Number of workers for Swift account service.\"}, \"MonitoringSubscriptionSwiftStorage\": {\"default\": \"overcloud-swift-storage\", \"type\": \"string\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Swift Proxy role.\", \"value\": {\"service_name\": \"swift_storage\", \"step_config\": \"include ::tripleo::profile::base::swift::storage\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"SwiftBase\", \"role_data\", \"config_settings\"]}, {\"swift::storage::all::incoming_chmod\": \"Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r\", \"swift::storage::all::storage_local_net_ip\": {\"get_param\": [\"ServiceNetMap\", \"SwiftStorageNetwork\"]}, \"swift::storage::all::account_server_workers\": {\"get_param\": \"SwiftAccountWorkers\"}, \"swift::storage::all::object_server_workers\": {\"get_param\": \"SwiftObjectWorkers\"}, \"swift::storage::all::object_pipeline\": [\"healthcheck\", \"recon\", \"object-server\"], \"swift::storage::all::outgoing_chmod\": \"Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r\", \"tripleo::profile::base::swift::storage::enable_swift_storage\": {\"get_param\": \"ControllerEnableSwiftStorage\"}, \"swift::storage::all::container_pipeline\": [\"healthcheck\", \"container-server\"], \"swift::storage::all::account_pipeline\": [\"healthcheck\", \"account-server\"], \"tripleo.swift_storage.firewall_rules\": {\"123 swift storage\": {\"dport\": [873, 6000, 6001, 6002]}}, \"swift::storage::disks::args\": {\"get_param\": \"SwiftRawDisks\"}, \"swift::storage::all::mount_check\": {\"if\": [\"swift_mount_check\", true, false]}, \"swift::storage::all::container_server_workers\": {\"get_param\": \"SwiftContainerWorkers\"}}]}, \"upgrade_tasks\": [{\"tags\": \"step1\", \"name\": \"Stop swift storage services\", \"service\": \"name={{ item }} state=stopped\", \"with_items\": [\"openstack-swift-account-auditor\", \"openstack-swift-account-reaper\", \"openstack-swift-account-replicator\", \"openstack-swift-account\", \"openstack-swift-container-auditor\", \"openstack-swift-container-replicator\", \"openstack-swift-container-updater\", \"openstack-swift-container\", \"openstack-swift-object-auditor\", \"openstack-swift-object-expirer\", \"openstack-swift-object-replicator\", \"openstack-swift-object-updater\", \"openstack-swift-object\"]}], \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionSwiftStorage\"}}}}, \"conditions\": {\"swift_mount_check\": {\"or\": [{\"equals\": [{\"get_param\": \"SwiftMountCheck\"}, true]}, {\"not\": {\"equals\": [{\"get_param\": \"SwiftRawDisks\"}, {}]}}]}}, \"resources\": {\"SwiftBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron ML2 plugin.\", \"value\": {\"service_name\": \"neutron_plugin_ml2\", \"step_config\": \"include ::tripleo::profile::base::neutron::plugins::ml2\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronBase\", \"role_data\", \"config_settings\"]}, {\"neutron::plugins::ml2::tenant_network_types\": {\"get_param\": \"NeutronNetworkType\"}, \"neutron::plugins::ml2::type_drivers\": {\"get_param\": \"NeutronTypeDrivers\"}, \"neutron::plugins::ml2::extension_drivers\": {\"get_param\": \"NeutronPluginExtensions\"}, \"neutron::plugins::ml2::firewall_driver\": {\"get_param\": \"NeutronFirewallDriver\"}, \"neutron::plugins::ml2::network_vlan_ranges\": {\"get_param\": \"NeutronNetworkVLANRanges\"}, \"neutron::plugins::ml2::flat_networks\": {\"get_param\": \"NeutronFlatNetworks\"}, \"neutron::plugins::ml2::vni_ranges\": {\"get_param\": \"NeutronVniRanges\"}, \"neutron::plugins::ml2::tunnel_id_ranges\": {\"get_param\": \"NeutronTunnelIdRanges\"}, \"neutron::plugins::ml2::mechanism_drivers\": {\"get_param\": \"NeutronMechanismDrivers\"}, \"neutron::plugins::ml2::overlay_ip_version\": {\"get_param\": \"NeutronOverlayIPVersion\"}}]}, \"service_config_settings\": {\"horizon\": {\"neutron::plugins::ml2::mechanism_drivers\": {\"get_param\": \"NeutronMechanismDrivers\"}}}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron ML2 Plugin configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NeutronTypeDrivers\": {\"default\": \"vxlan,vlan,flat,gre\", \"type\": \"comma_delimited_list\", \"description\": \"Comma-separated list of network type driver entrypoints to be loaded.\
0.355 | 3311: \"}, \"NeutronNetworkVLANRanges\": {\"default\": \"datacentre:1:1000\", \"type\": \"comma_delimited_list\", \"description\": \"The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the Neutron documentation for permitted values. Defaults to permitting VLANs 1 to 1000 on the 'datacentre' physical network (See NeutronBridgeMappings).\
0.355 | 3311: \"}, \"NeutronMechanismDrivers\": {\"default\": \"openvswitch\", \"type\": \"comma_delimited_list\", \"description\": \"The mechanism drivers for the Neutron tenant network.\
0.355 | 3311: \"}, \"NeutronFlatNetworks\": {\"default\": \"datacentre\", \"type\": \"comma_delimited_list\", \"description\": \"If set, flat networks to configure in neutron plugins.\"}, \"NeutronOverlayIPVersion\": {\"default\": 4, \"type\": \"number\", \"description\": \"IP version used for all overlay network endpoints.\", \"constraints\": [{\"allowed_values\": [4, 6]}]}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NeutronPluginExtensions\": {\"default\": \"qos,port_security\", \"type\": \"comma_delimited_list\", \"description\": \"Comma-separated list of extensions enabled for the Neutron plugin.\
0.355 | 3311: \"}, \"NeutronTunnelIdRanges\": {\"default\": [\"1:4094\"], \"type\": \"comma_delimited_list\", \"description\": \"Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges\
0.355 | 3311: of GRE tunnel IDs that are available for tenant network allocation\
0.355 | 3311: \"}, \"NeutronVniRanges\": {\"default\": [\"1:4094\"], \"type\": \"comma_delimited_list\", \"description\": \"Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges\
0.355 | 3311: of VXLAN VNI IDs that are available for tenant network allocation\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NeutronNetworkType\": {\"default\": \"vxlan\", \"type\": \"comma_delimited_list\", \"description\": \"The tenant network type for Neutron.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"NeutronFirewallDriver\": {\"default\": \"openvswitch\", \"type\": \"string\", \"description\": \"Firewall driver for realizing neutron security group function\"}}, \"resources\": {\"NeutronBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-api.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"NovaEnableDBPurge\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to create cron job for purging soft deleted rows in Nova database.\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NovaWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Nova services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NovaDefaultFloatingPool\": {\"default\": \"public\", \"type\": \"string\", \"description\": \"Default pool for floating IP addresses\"}, \"InstanceNameTemplate\": {\"default\": \"instance-%08x\", \"type\": \"string\", \"description\": \"Template string to be used to generate instance names\"}, \"MonitoringSubscriptionNovaApi\": {\"default\": \"overcloud-nova-api\", \"type\": \"string\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NovaApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Nova API.\
0.355 | 3311: e.g. { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NovaApiLoggingSource\": {\"default\": {\"path\": \"/var/log/nova/nova-api.log\", \"tag\": \"openstack.nova.api\"}, \"type\": \"json\"}, \"NovaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the nova service and db account\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NeutronMetadataProxySharedSecret\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Shared secret to prevent spoofing\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"NovaDbSyncTimeout\": {\"default\": 300, \"type\": \"number\", \"description\": \"Timeout for Nova db sync\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova API service.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"nova::api::default_floating_pool\": {\"get_param\": \"NovaDefaultFloatingPool\"}, \"nova::api::enable_proxy_headers_parsing\": true, \"nova::wsgi::apache_api::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"nova::api::sync_db_api\": true, \"nova::cron::archive_deleted_rows::hour\": \"*/12\", \"nova::keystone::authtoken::project_name\": \"service\", \"nova_enable_db_purge\": {\"get_param\": \"NovaEnableDBPurge\"}, \"nova::wsgi::apache_api::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"nova::keystone::authtoken::password\": {\"get_param\": \"NovaPassword\"}, \"nova::api::service_name\": \"httpd\", \"tripleo.nova_api.firewall_rules\": {\"113 nova_api\": {\"dport\": [8773, 3773, 8774, 13774, 8775]}}, \"nova::api::neutron_metadata_proxy_shared_secret\": {\"get_param\": \"NeutronMetadataProxySharedSecret\"}, \"nova::wsgi::apache_api::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}, \"nova::keystone::authtoken::user_domain_name\": \"Default\", \"nova::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"nova::api::instance_name_template\": {\"get_param\": \"InstanceNameTemplate\"}, \"nova::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"nova_wsgi_enabled\": true, \"nova::api::api_bind_address\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"nova::keystone::authtoken::project_domain_name\": \"Default\", \"nova::policy::policies\": {\"get_param\": \"NovaApiPolicies\"}, \"nova::cron::archive_deleted_rows::destination\": \"/dev/null\", \"nova::api::enabled\": true}, {\"if\": [\"nova_workers_zero\", {}, {\"nova::api::osapi_compute_workers\": {\"get_param\": \"NovaWorkers\"}, \"nova::wsgi::apache_api::workers\": {\"get_param\": \"NovaWorkers\"}}]}]}, \"upgrade_tasks\": {\"list_concat\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"upgrade_tasks\"]}, [{\"register\": \"bootstrap_node\", \"command\": \"hiera bootstrap_nodeid\", \"name\": \"get bootstrap nodeid\", \"tags\": \"common\"}, {\"tags\": \"common\", \"set_fact\": \"is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}\", \"name\": \"set is_bootstrap_node fact\"}, {\"when\": \"is_bootstrap_node\", \"command\": \"nova-manage db online_data_migrations\", \"name\": \"Extra migration for nova tripleo/+bug/1656791\", \"tags\": \"step0,pre-upgrade\"}, {\"name\": \"Stop and disable nova_api service (pre-upgrade not under httpd)\", \"service\": \"name=openstack-nova-api state=stopped enabled=no\", \"tags\": \"step2\"}, {\"copy\": {\"dest\": \"/root/nova-api_upgrade_manifest.pp\", \"content\": \"$transport_url = os_transport_url({\
0.355 | 3311: 'transport' => hiera('messaging_service_name', 'rabbit'),\
0.355 | 3311: 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),\
0.355 | 3311: 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),\
0.355 | 3311: 'username' => hiera('nova::rabbit_userid', 'guest'),\
0.355 | 3311: 'password' => hiera('nova::rabbit_password'),\
0.355 | 3311: 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))\
0.355 | 3311: }) oslo::messaging::default { 'nova_config':\
0.355 | 3311: transport_url => $transport_url\
0.355 | 3311: }\
0.355 | 3311: \", \"mode\": 384}, \"when\": \"is_bootstrap_node\", \"name\": \"Create puppet manifest to set transport_url in nova.conf\", \"tags\": \"step5\"}, {\"failed_when\": \"puppet_apply_nova_api_upgrade.rc not in [0,2]\", \"changed_when\": \"puppet_apply_nova_api_upgrade.rc == 2\", \"command\": \"puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp\", \"name\": \"Run puppet apply to set tranport_url in nova.conf\", \"tags\": \"step5\", \"register\": \"puppet_apply_nova_api_upgrade\", \"when\": \"is_bootstrap_node\"}, {\"shell\": \"nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)\", \"when\": \"is_bootstrap_node\", \"name\": \"Setup cell_v2 (map cell0)\", \"tags\": \"step5\"}, {\"failed_when\": \"nova_api_create_cell.rc not in [0,2]\", \"shell\": \"nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)\", \"name\": \"Setup cell_v2 (create default cell)\", \"tags\": \"step5\", \"register\": \"nova_api_create_cell\", \"when\": \"is_bootstrap_node\", \"changed_when\": \"nova_api_create_cell.rc == 0\"}, {\"command\": \"nova-manage db sync\", \"name\": \"Setup cell_v2 (sync nova/cell DB)\", \"tags\": \"step5\", \"async\": {\"get_param\": \"NovaDbSyncTimeout\"}, \"poll\": 10, \"when\": \"is_bootstrap_node\"}, {\"shell\": \"nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == \\\"default\\\" {print $4}'\", \"register\": \"nova_api_cell_uuid\", \"when\": \"is_bootstrap_node\", \"name\": \"Setup cell_v2 (get cell uuid)\", \"tags\": \"step5\"}, {\"when\": \"is_bootstrap_node\", \"command\": \"nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose\", \"name\": \"Setup cell_v2 (migrate hosts)\", \"tags\": \"step5\"}, {\"when\": \"is_bootstrap_node\", \"command\": \"nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}\", \"name\": \"Setup cell_v2 (migrate instances)\", \"tags\": \"step5\"}, {\"command\": \"nova-manage api_db sync\", \"when\": \"is_bootstrap_node\", \"name\": \"Sync nova_api DB\", \"tags\": \"step5\"}, {\"when\": \"is_bootstrap_node\", \"command\": \"nova-manage db online_data_migrations\", \"name\": \"Online data migration for nova\", \"tags\": \"step5\"}]]}, \"logging_groups\": [\"nova\"], \"service_name\": \"nova_api\", \"step_config\": \"include tripleo::profile::base::nova::api\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaApi\"}, \"service_config_settings\": {\"keystone\": {\"nova::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"uri\"]}, \"nova::keystone::auth::password\": {\"get_param\": \"NovaPassword\"}, \"nova::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"uri\"]}, \"nova::keystone::auth::tenant\": \"service\", \"nova::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"nova::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"uri\"]}}, \"mysql\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"service_config_settings\", \"mysql\"]}, {\"nova::db::mysql_api::dbname\": \"nova_api\", \"nova::db::mysql_api::password\": {\"get_param\": \"NovaPassword\"}, \"nova::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"nova::db::mysql_api::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"nova::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"nova::db::mysql::user\": \"nova\", \"nova::db::mysql_api::user\": \"nova_api\", \"nova::db::mysql::dbname\": \"nova\", \"nova::db::mysql_api::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"nova::db::mysql::password\": {\"get_param\": \"NovaPassword\"}}]}}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"NovaApiLoggingSource\"}}}}, \"conditions\": {\"nova_workers_zero\": {\"equals\": [{\"get_param\": \"NovaWorkers\"}, 0]}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"RoleParameters\": {\"get_param\": \"RoleParameters\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-ringbuilder.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for Swift Ringbuilder configuration.\", \"value\": {\"service_name\": \"swift_ringbuilder\", \"step_config\": \"include ::tripleo::profile::base::swift::ringbuilder\", \"config_settings\": {\"tripleo::profile::base::swift::ringbuilder::min_part_hours\": {\"get_param\": \"SwiftMinPartHours\"}, \"tripleo::profile::base::swift::ringbuilder::replicas\": {\"get_param\": \"SwiftReplicas\"}, \"tripleo::profile::base::swift::ringbuilder::raw_disks\": {\"yaql\": {\"expression\": \"$.data.raw_disk_lists.flatten()\", \"data\": {\"raw_disk_lists\": [{\"if\": [\"swift_use_local_dir\", [\":%PORT%/d1\"], []]}, {\"repeat\": {\"for_each\": {\"DEVICE\": {\"get_param\": \"SwiftRawDisks\"}}, \"template\": \":%PORT%/DEVICE\"}}]}}}, \"tripleo::profile::base::swift::ringbuilder::swift_ring_get_tempurl\": {\"get_param\": \"SwiftRingGetTempurl\"}, \"tripleo::profile::base::swift::ringbuilder::raw_disk_prefix\": \"r1z1-\", \"tripleo::profile::base::swift::ringbuilder::part_power\": {\"get_param\": \"SwiftPartPower\"}, \"tripleo::profile::base::swift::ringbuilder::swift_ring_put_tempurl\": {\"get_param\": \"SwiftRingPutTempurl\"}, \"tripleo::profile::base::swift::ringbuilder::build_ring\": {\"get_param\": \"SwiftRingBuild\"}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"swift_use_local_dir\": {\"and\": [{\"equals\": [{\"get_param\": \"SwiftUseLocalDir\"}, true]}, {\"equals\": [{\"get_param\": \"SwiftRawDisks\"}, {}]}]}}, \"description\": \"OpenStack Swift Ringbuilder\
0.355 | 3311: \", \"parameters\": {\"SwiftMinPartHours\": {\"default\": 1, \"type\": \"number\", \"description\": \"The minimum time (in hours) before a partition in a ring can be moved following a rebalance.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"SwiftRawDisks\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"SwiftUseLocalDir\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Use a local directory for Swift storage services when building rings\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"SwiftPartPower\": {\"default\": 10, \"type\": \"number\", \"description\": \"Partition Power to use when building Swift rings\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"SwiftRingBuild\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to manage Swift rings or not\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"SwiftRingPutTempurl\": {\"default\": \"\", \"type\": \"string\", \"description\": \"A temporary Swift URL to upload rings to.\"}, \"SwiftRingGetTempurl\": {\"default\": \"\", \"type\": \"string\", \"description\": \"A temporary Swift URL to download rings from.\"}, \"SwiftReplicas\": {\"default\": 3, \"type\": \"number\", \"description\": \"How many replicas to use in the swift rings.\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/all-nodes-validation.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The ID of the AllNodesValidationsImpl resource.\", \"value\": {\"get_resource\": \"AllNodesValidationsImpl\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Software Config to drive validations that occur on all nodes. Note, you need the heat-config-script element built into your images, due to the script group below.\
0.355 | 3311: \", \"parameters\": {\"ValidateFqdn\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.\"}, \"PingTestIps\": {\"default\": \"\", \"type\": \"string\", \"description\": \"A string containing a space separated list of IP addresses used to ping test each available network interface.\"}, \"ValidateNtp\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Validation to ensure at least one time source is accessible.\"}}, \"resources\": {\"AllNodesValidationsImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"default\": {\"get_param\": \"PingTestIps\"}, \"name\": \"ping_test_ips\"}, {\"default\": {\"get_param\": \"ValidateFqdn\"}, \"name\": \"validate_fqdn\"}, {\"default\": {\"get_param\": \"ValidateNtp\"}, \"name\": \"validate_ntp\"}], \"config\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/validation-scripts/all-nodes.sh\"}, \"group\": \"script\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/upgrade_config.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The software config which runs ansible with tags\", \"value\": {\"get_resource\": \"AnsibleUpgradeConfigImpl\"}}, \"upgrade_config\": {\"description\": \"The configuration file used for upgrade\", \"value\": {\"get_attr\": [\"AnsibleConfig\", \"value\"]}}}, \"heat_template_version\": \"pike\", \"description\": \"Upgrade for via ansible by applying a step related tag\", \"parameters\": {\"UpgradeStepConfig\": {\"default\": \"\", \"type\": \"json\", \"description\": \"Config (ansible yaml) that will be used to step through the deployment.\"}, \"step\": {\"type\": \"string\", \"description\": \"Step number of the upgrade\"}, \"SkipUpgradeConfigTags\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"Ansible tags to skip during upgrade, e.g validation skips pre-upgrade validations\"}}, \"resources\": {\"AnsibleConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"value\": {\"str_replace\": {\"params\": {\"CONFIG\": [{\"connection\": \"local\", \"tasks\": {\"get_param\": \"UpgradeStepConfig\"}, \"hosts\": \"localhost\"}]}, \"template\": \"CONFIG\"}}}}, \"AnsibleUpgradeConfigImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"name\": \"role\"}], \"config\": {\"get_attr\": [\"AnsibleConfig\", \"value\"]}, \"options\": {\"skip_tags\": {\"list_join\": [\",\", {\"get_param\": \"SkipUpgradeConfigTags\"}]}, \"tags\": {\"str_replace\": {\"params\": {\"STEP\": {\"get_param\": \"step\"}}, \"template\": \"common,stepSTEP\"}}, \"modulepath\": \"/usr/share/ansible-modules\"}, \"group\": \"ansible\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/ssh/host_public_key.yaml": "{\"outputs\": {\"ed25519\": {\"description\": \"Host ssh public key (ed25519)\", \"value\": {\"get_attr\": [\"SshHostPubKeyDeployment\", \"ed25519\"]}}, \"ecdsa\": {\"description\": \"Host ssh public key (ecdsa)\", \"value\": {\"get_attr\": [\"SshHostPubKeyDeployment\", \"ecdsa\"]}}, \"rsa\": {\"description\": \"Host ssh public key (rsa)\", \"value\": {\"get_attr\": [\"SshHostPubKeyDeployment\", \"rsa\"]}}}, \"heat_template_version\": \"pike\", \"description\": \"This is a template which will fetch the ssh host public key.\
0.355 | 3311: \", \"parameters\": {\"deployment_actions\": {\"default\": [\"CREATE\", \"UPDATE\"], \"type\": \"comma_delimited_list\", \"description\": \"List of stack actions that will trigger any deployments in this templates. The actions will be an empty list of the server is in the toplevel DeploymentServerBlacklist parameter's value.\
0.355 | 3311: \"}, \"server\": {\"type\": \"string\", \"description\": \"ID of the node to apply this config to\"}}, \"resources\": {\"SshHostPubKeyDeployment\": {\"type\": \"OS::Heat::SoftwareDeployment\", \"properties\": {\"config\": {\"get_resource\": \"SshHostPubKeyConfig\"}, \"name\": \"SshHostPubKeyDeployment\", \"actions\": {\"get_param\": \"deployment_actions\"}, \"server\": {\"get_param\": \"server\"}}}, \"SshHostPubKeyConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"outputs\": [{\"name\": \"rsa\"}, {\"name\": \"ecdsa\"}, {\"name\": \"ed25519\"}], \"config\": \"#!/bin/sh -x\
0.355 | 3311: test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa\
0.355 | 3311: test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa\
0.355 | 3311: test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519\
0.355 | 3311: \", \"group\": \"script\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/tripleo-firewall.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the TripleO firewall settings\", \"value\": {\"service_name\": \"tripleo_firewall\", \"step_config\": \"include ::tripleo::firewall\
0.355 | 3311: \", \"config_settings\": {\"tripleo::firewall::purge_firewall_rules\": {\"get_param\": \"PurgeFirewallRules\"}, \"tripleo::firewall::manage_firewall\": {\"get_param\": \"ManageFirewall\"}}, \"upgrade_tasks\": [{\"shell\": \"cat /etc/sysconfig/ip6tables > /etc/sysconfig/ip6tables.n-o-upgrade; cat</dev/null>/etc/sysconfig/ip6tables\", \"name\": \"blank ipv6 rule before activating ipv6 firewall.\", \"args\": {\"creates\": \"/etc/sysconfig/ip6tables.n-o-upgrade\"}, \"tags\": \"step3\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"TripleO Firewall settings\
0.355 | 3311: \", \"parameters\": {\"PurgeFirewallRules\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether IPtables rules should be purged before setting up the new ones.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"ManageFirewall\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to manage IPtables rules.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/tripleo-packages.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the TripleO package settings\", \"value\": {\"update_tasks\": [{\"yum\": \"name=* state=latest\", \"when\": \"step == \\\"3\\\"\", \"name\": \"Update all packages\"}], \"service_name\": \"tripleo_packages\", \"step_config\": \"include ::tripleo::packages\
0.355 | 3311: \", \"config_settings\": {\"tripleo::packages::enable_install\": {\"get_param\": \"EnablePackageInstall\"}}, \"upgrade_tasks\": [{\"register\": \"rpm_python_check\", \"yum\": \"name=rpm-python state=present\", \"name\": \"Check yum for rpm-python present\", \"tags\": \"step0\"}, {\"fail\": \"msg=\\\"rpm-python package was not present before this run! Check environment before re-running\\\"\", \"when\": \"rpm_python_check.changed != false\", \"name\": \"Fail when rpm-python wasn't present\", \"tags\": \"step0\"}, {\"yum\": \"name=* state=latest\", \"name\": \"Update all packages\", \"tags\": \"step3\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"TripleO Package installation settings\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EnablePackageInstall\": {\"default\": \"false\", \"type\": \"boolean\", \"description\": \"Set to true to enable package installation at deploy time\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/ceilometer-expirer-disabled.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the disabling Ceilometer Expirer role.\", \"value\": {\"service_name\": \"ceilometer_expirer_disabled\", \"upgrade_tasks\": [{\"failed_when\": \"remove_ceilometer_expirer_crontab.rc != 0 and remove_ceilometer_expirer_crontab.stderr != \\\"no crontab for ceilometer\\\"\", \"shell\": \"/usr/bin/crontab -u ceilometer -r\", \"name\": \"Remove ceilometer expirer cron tab on upgrade\", \"tags\": \"step1\", \"register\": \"remove_ceilometer_expirer_crontab\", \"changed_when\": \"remove_ceilometer_expirer_crontab.stderr != \\\"no crontab for ceilometer\\\"\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Ceilometer Expirer service, disabled since pike\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-dhcp.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron DHCP agent service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNeutronDhcp\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronBase\", \"role_data\", \"config_settings\"]}, {\"neutron::agents::dhcp::enable_force_metadata\": {\"get_param\": \"NeutronEnableForceMetadata\"}, \"neutron::agents::dhcp::enable_isolated_metadata\": {\"get_param\": \"NeutronEnableIsolatedMetadata\"}, \"tripleo.neutron_dhcp.firewall_rules\": {\"115 neutron dhcp input\": {\"dport\": 67, \"proto\": \"udp\"}, \"116 neutron dhcp output\": {\"dport\": 68, \"chain\": \"OUTPUT\", \"proto\": \"udp\"}}, \"neutron::agents::dhcp::enable_metadata_network\": {\"get_param\": \"NeutronEnableMetadataNetwork\"}, \"neutron::agents::dhcp::dnsmasq_dns_servers\": {\"get_param\": \"NeutronDhcpAgentDnsmasqDnsServers\"}}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"neutron_dhcp_agent_enabled\", \"command\": \"systemctl is-enabled neutron-dhcp-agent\", \"name\": \"Check if neutron_dhcp_agent is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'neutron-dhcp-agent' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"neutron_dhcp_agent_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service neutron-dhcp-agent is running\", \"tags\": \"step0,validation\"}, {\"when\": \"neutron_dhcp_agent_enabled.rc == 0\", \"name\": \"Stop neutron_dhcp service\", \"service\": \"name=neutron-dhcp-agent state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"neutron\"], \"service_name\": \"neutron_dhcp\", \"logging_source\": {\"get_param\": \"NeutronDhcpAgentLoggingSource\"}, \"step_config\": \"include tripleo::profile::base::neutron::dhcp\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron DHCP agent configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NeutronEnableForceMetadata\": {\"default\": false, \"type\": \"boolean\", \"description\": \"If True, DHCP always provides metadata route to VM.\"}, \"NeutronDhcpAgentDnsmasqDnsServers\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"List of servers to use as dnsmasq forwarders\"}, \"NeutronDhcpAgentLoggingSource\": {\"default\": {\"path\": \"/var/log/neutron/dhcp-agent.log\", \"tag\": \"openstack.neutron.agent.dhcp\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"MonitoringSubscriptionNeutronDhcp\": {\"default\": \"overcloud-neutron-dhcp\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NeutronEnableMetadataNetwork\": {\"default\": false, \"type\": \"boolean\", \"description\": \"If True, DHCP provide metadata network. Requires either IsolatedMetadata or ForceMetadata parameters to also be True.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"NeutronEnableIsolatedMetadata\": {\"default\": false, \"type\": \"boolean\", \"description\": \"If True, DHCP provide metadata route to VM.\"}}, \"resources\": {\"NeutronBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-api.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Gnocchi role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"GnocchiServiceBase\", \"role_data\", \"config_settings\"]}, {\"gnocchi::api::enable_proxy_headers_parsing\": true, \"gnocchi::wsgi::apache::wsgi_process_display_name\": \"gnocchi_wsgi\", \"tripleo::profile::base::gnocchi::api::gnocchi_backend\": {\"get_param\": \"GnocchiBackend\"}, \"gnocchi::keystone::authtoken::password\": {\"get_param\": \"GnocchiPassword\"}, \"gnocchi::api::service_name\": \"httpd\", \"gnocchi::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"gnocchi::keystone::authtoken::project_name\": \"service\", \"gnocchi::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"gnocchi::api::enabled\": true, \"gnocchi::wsgi::apache::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"gnocchi::wsgi::apache::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"gnocchi::wsgi::apache::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}, \"gnocchi::keystone::authtoken::user_domain_name\": \"Default\", \"tripleo.gnocchi_api.firewall_rules\": {\"129 gnocchi-api\": {\"dport\": [8041, 13041]}}, \"gnocchi::keystone::authtoken::project_domain_name\": \"Default\", \"gnocchi::policy::policies\": {\"get_param\": \"GnocchiApiPolicies\"}}]}, \"upgrade_tasks\": {\"list_concat\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"upgrade_tasks\"]}, [{\"name\": \"Stop gnocchi_api service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}]]}, \"logging_groups\": [\"gnocchi\"], \"service_name\": \"gnocchi_api\", \"step_config\": \"include ::tripleo::profile::base::gnocchi::api\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionGnocchiApi\"}, \"service_config_settings\": {\"keystone\": {\"gnocchi::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"gnocchi::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"uri\"]}, \"gnocchi::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"uri\"]}, \"gnocchi::keystone::auth::tenant\": \"service\", \"gnocchi::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"uri\"]}, \"gnocchi::keystone::auth::password\": {\"get_param\": \"GnocchiPassword\"}}, \"mysql\": {\"gnocchi::db::mysql::password\": {\"get_param\": \"GnocchiPassword\"}, \"gnocchi::db::mysql::dbname\": \"gnocchi\", \"gnocchi::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"gnocchi::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"gnocchi::db::mysql::user\": \"gnocchi\"}}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"GnocchiApiLoggingSource\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"Gnocchi service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"GnocchiBackend\": {\"default\": \"swift\", \"type\": \"string\", \"description\": \"The short name of the Gnocchi backend to use. Should be one of swift, rbd, or file\", \"constraints\": [{\"allowed_values\": [\"swift\", \"file\", \"rbd\"]}]}, \"GnocchiPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the gnocchi service and db account.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"GnocchiApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Gnocchi API.\
0.355 | 3311: e.g. { gnocchi-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"GnocchiApiLoggingSource\": {\"default\": {\"path\": \"/var/log/gnocchi/app.log\", \"tag\": \"openstack.gnocchi.api\"}, \"type\": \"json\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"MonitoringSubscriptionGnocchiApi\": {\"default\": \"overcloud-gnocchi-api\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"GnocchiServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-migration-target.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova migration target service.\", \"value\": {\"service_name\": \"nova_migration_target\", \"step_config\": \"include tripleo::profile::base::nova::migration::target\", \"config_settings\": {\"tripleo::profile::base::nova::migration::target::ssh_authorized_keys\": [{\"get_param\": [\"MigrationSshKey\", \"public_key\"]}], \"live_migration_ssh_inbound_addr\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}, \"cold_migration_ssh_inbound_addr\": {\"get_param\": [\"ServiceNetMap\", \"NovaColdMigrationNetwork\"]}, \"tripleo::profile::base::nova::migration::target::ssh_localaddrs\": [\"%{hiera('cold_migration_ssh_inbound_addr')}\", \"%{hiera('live_migration_ssh_inbound_addr')}\"]}}}}, \"heat_template_version\": \"ocata\", \"description\": \"OpenStack Nova migration target configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"MigrationSshKey\": {\"default\": {\"public_key\": \"\", \"private_key\": \"\"}, \"type\": \"json\", \"description\": \"SSH key for migration. Expects a dictionary with keys 'public_key' and 'private_key'. Values should be identical to SSH public/private key files.\
0.355 | 3311: \"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/horizon.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Horizon role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionHorizon\"}, \"config_settings\": {\"map_merge\": [{\"horizon::password_validator\": {\"get_param\": [\"HorizonPasswordValidator\"]}, \"horizon::cache_backend\": \"django.core.cache.backends.memcached.MemcachedCache\", \"horizon::bind_address\": {\"get_param\": [\"ServiceNetMap\", \"HorizonNetwork\"]}, \"horizon::enable_secure_proxy_ssl_header\": true, \"horizon::django_session_engine\": \"django.contrib.sessions.backends.cache\", \"horizon::allowed_hosts\": {\"get_param\": \"HorizonAllowedHosts\"}, \"horizon::secret_key\": {\"yaql\": {\"expression\": \"$.data.passwords.where($ != '').first()\", \"data\": {\"passwords\": [{\"get_param\": \"HorizonSecret\"}, {\"get_param\": [\"DefaultPasswords\", \"horizon_secret\"]}]}}}, \"horizon::disallow_iframe_embed\": true, \"horizon::keystone_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"horizon::secure_cookies\": {\"get_param\": [\"HorizonSecureCookies\"]}, \"horizon::enforce_password_check\": true, \"horizon::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HorizonNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"horizon::disable_password_reveal\": true, \"tripleo.horizon.firewall_rules\": {\"126 horizon\": {\"dport\": [80, 443]}}, \"horizon::password_validator_help\": {\"get_param\": [\"HorizonPasswordValidatorHelp\"]}, \"horizon::listen_ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"horizon::horizon_ca\": {\"get_param\": \"InternalTLSCAFile\"}, \"horizon::vhost_extra_params\": {\"priority\": 10, \"access_log_format\": \"%a %l %u %t \\\\\\\"%r\\\\\\\" %>s %b \\\\\\\"%%{}{Referer}i\\\\\\\" \\\\\\\"%%{}{User-Agent}i\\\\\\\"\", \"options\": [\"FollowSymLinks\", \"MultiViews\"]}, \"memcached_ipv6\": {\"get_param\": \"MemcachedIPv6\"}}, {\"if\": [\"debug_unset\", {\"horizon::django_debug\": {\"get_param\": \"HorizonDebug\"}}, {\"horizon::django_debug\": {\"get_param\": \"Debug\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"httpd_enabled\", \"command\": \"systemctl is-enabled httpd\", \"name\": \"Check if httpd is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'httpd' --property ActiveState | grep '\\\\bactive\\\\b'\
0.355 | 3311: \", \"when\": \"httpd_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check if httpd is running\", \"tags\": \"step0,validation\"}, {\"when\": \"httpd_enabled.rc == 0\", \"name\": \"Stop Horizon (under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}], \"service_config_settings\": {\"haproxy\": {\"tripleo.horizon.firewall_rules\": {\"127 horizon\": {\"dport\": [80, 443]}}}}, \"service_name\": \"horizon\", \"step_config\": \"include ::tripleo::profile::base::horizon\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"debug_unset\": {\"equals\": [{\"get_param\": \"Debug\"}, \"\"]}}, \"description\": \"Horizon service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"HorizonPasswordValidator\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Regex for password validation\"}, \"InternalTLSCAFile\": {\"default\": \"/etc/ipa/ca.crt\", \"type\": \"string\", \"description\": \"Specifies the default CA cert to use if TLS is used for services in the internal network.\"}, \"HorizonDebug\": {\"default\": false, \"type\": \"string\", \"description\": \"Set to True to enable debugging Horizon service.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"HorizonSecureCookies\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"MemcachedIPv6\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Enable IPv6 features in Memcached.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"HorizonPasswordValidatorHelp\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Help text for password validation\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"MonitoringSubscriptionHorizon\": {\"default\": \"overcloud-horizon\", \"type\": \"string\"}, \"HorizonSecret\": {\"default\": \"\", \"hidden\": true, \"type\": \"string\", \"description\": \"Secret key for Django\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"HorizonAllowedHosts\": {\"default\": \"*\", \"type\": \"comma_delimited_list\", \"description\": \"A list of IP/Hostname for the server Horizon is running on. Used for header checks.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/deploy-artifacts.sh": "#!/bin/bash
0.355 | 3311:
0.355 | 3311: TMP_DATA=$(mktemp -d)
0.355 | 3311: function cleanup {
0.355 | 3311: rm -Rf \"$TMP_DATA\"
0.355 | 3311: }
0.355 | 3311: trap cleanup EXIT
0.355 | 3311:
0.355 | 3311: if [ -n \"$artifact_urls\" ]; then
0.355 | 3311: for URL in $(echo $artifact_urls | sed -e \"s| |\
0.355 | 3311: |g\" | sort -u); do
0.355 | 3311: curl --globoff -o $TMP_DATA/file_data \"$URL\"
0.355 | 3311: if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
0.355 | 3311: mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
0.355 | 3311: yum install -y $TMP_DATA/file_data.rpm
0.355 | 3311: rm $TMP_DATA/file_data.rpm
0.355 | 3311: elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
0.355 | 3311: pushd /
0.355 | 3311: tar xvzf $TMP_DATA/file_data
0.355 | 3311: popd
0.355 | 3311: else
0.355 | 3311: echo \"ERROR: Unsupported file format: $URL\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: if [ -f $TMP_DATA/file_data ]; then
0.355 | 3311: rm $TMP_DATA/file_data
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: else
0.355 | 3311: echo \"No artifact_urls was set. Skipping...\"
0.355 | 3311: fi
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/net_ip_list_map.yaml": "{\"outputs\": {\"short_service_hostnames\": {\"description\": \"Map of enabled services to a list of hostnames where they're running regardless of the network\
0.355 | 3311: \", \"value\": {\"yaql\": {\"expression\": \"dict($.data.map.items().where(len($[1]) > 0))\", \"data\": {\"map\": {\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_attr\": [\"EnabledServicesValue\", \"value\"]}}, \"template\": {\"SERVICE_short_node_names\": {\"get_param\": \"ServiceHostnameList\"}}}}}}}}}, \"short_service_bootstrap_hostnames\": {\"description\": \"Map of enabled services to a list of hostnames where they're running regardless of the network Used for bootstrap purposes\
0.355 | 3311: \", \"value\": {\"yaql\": {\"expression\": \"dict($.data.map.items().where(len($[1]) > 0))\", \"data\": {\"map\": {\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_attr\": [\"EnabledServicesValue\", \"value\"]}}, \"template\": {\"SERVICE_short_bootstrap_node_name\": {\"get_param\": \"ServiceHostnameList\"}}}}}}}}}, \"net_ip_map\": {\"description\": \"A Hash containing a mapping of network names to assigned lists of IP addresses.\
0.355 | 3311: \", \"value\": {\"get_attr\": [\"NetIpMapValue\", \"value\"]}}, \"service_hostnames\": {\"description\": \"Map of enabled services to a list of hostnames where they're running\
0.355 | 3311: \", \"value\": {\"map_replace\": [{\"yaql\": {\"expression\": \"dict($.data.map.items().where(not $[1].endsWith(\\\"_network\\\")))\", \"data\": {\"map\": {\"map_replace\": [{\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_attr\": [\"EnabledServicesValue\", \"value\"]}}, \"template\": {\"SERVICE_node_names\": \"SERVICE_network\"}}}}, {\"values\": {\"get_param\": \"ServiceNetMap\"}}]}}}}, {\"values\": {\"get_param\": \"NetworkHostnameMap\"}}]}}, \"ctlplane_service_ips\": {\"description\": \"Map of enabled services to a list of their ctlplane IP addresses\
0.355 | 3311: \", \"value\": {\"yaql\": {\"expression\": \"dict($.data.map.items().where(len($[1]) > 0))\", \"data\": {\"map\": {\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_attr\": [\"EnabledServicesValue\", \"value\"]}}, \"template\": {\"SERVICE_ctlplane_node_ips\": {\"get_param\": \"ControlPlaneIpList\"}}}}}}}}}, \"service_ips\": {\"description\": \"Map of enabled services to a list of their IP addresses\
0.355 | 3311: \", \"value\": {\"yaql\": {\"expression\": \"dict($.data.map.items().where(not isString($[1])))\", \"data\": {\"map\": {\"map_replace\": [{\"map_replace\": [{\"map_merge\": {\"repeat\": {\"for_each\": {\"SERVICE\": {\"get_attr\": [\"EnabledServicesValue\", \"value\"]}}, \"template\": {\"SERVICE_node_ips\": \"SERVICE_network\"}}}}, {\"values\": {\"get_param\": \"ServiceNetMap\"}}]}, {\"values\": {\"get_attr\": [\"NetIpMapValue\", \"value\"]}}]}}}}}}, \"heat_template_version\": \"pike\", \"parameters\": {\"ExternalNetName\": {\"default\": \"external\", \"type\": \"string\", \"description\": \"The name of the external network.\"}, \"TenantNetName\": {\"default\": \"tenant\", \"type\": \"string\", \"description\": \"The name of the tenant network.\"}, \"TenantIpList\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"ServiceHostnameList\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"ManagementIpList\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"StorageMgmtIpList\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"StorageMgmtNetName\": {\"default\": \"storage_mgmt\", \"type\": \"string\", \"description\": \"The name of the storage_mgmt network.\"}, \"InternalApiNetName\": {\"default\": \"internal_api\", \"type\": \"string\", \"description\": \"The name of the internal_api network.\"}, \"NetworkHostnameMap\": {\"default\": [], \"type\": \"json\"}, \"EnabledServices\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\"}, \"StorageIpList\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"ExternalIpList\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"ControlPlaneIpList\": {\"default\": [], \"type\": \"comma_delimited_list\"}, \"StorageNetName\": {\"default\": \"storage\", \"type\": \"string\", \"description\": \"The name of the storage network.\"}, \"ManagementNetName\": {\"default\": \"management\", \"type\": \"string\", \"description\": \"The name of the management network.\"}, \"InternalApiIpList\": {\"default\": [], \"type\": \"comma_delimited_list\"}}, \"resources\": {\"NetIpMapValue\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"map_replace\": [{\"ctlplane\": {\"get_param\": \"ControlPlaneIpList\"}, \"management\": {\"get_param\": \"ManagementIpList\"}, \"external\": {\"get_param\": \"ExternalIpList\"}, \"internal_api\": {\"get_param\": \"InternalApiIpList\"}, \"storage_mgmt\": {\"get_param\": \"StorageMgmtIpList\"}, \"storage\": {\"get_param\": \"StorageIpList\"}, \"tenant\": {\"get_param\": \"TenantIpList\"}}, {\"keys\": {\"management\": {\"get_param\": \"ManagementNetName\"}, \"external\": {\"get_param\": \"ExternalNetName\"}, \"internal_api\": {\"get_param\": \"InternalApiNetName\"}, \"storage_mgmt\": {\"get_param\": \"StorageMgmtNetName\"}, \"storage\": {\"get_param\": \"StorageNetName\"}, \"tenant\": {\"get_param\": \"TenantNetName\"}}}]}}}, \"EnabledServicesValue\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"let(root => $) -> $.data.extra_services.items().where($[0] in $root.data.enabled_services).select($[1]).flatten() + $root.data.enabled_services\", \"data\": {\"enabled_services\": {\"get_param\": \"EnabledServices\"}, \"extra_services\": {\"keystone\": [\"keystone_admin_api\", \"keystone_public_api\"]}}}}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova base service.\", \"value\": {\"service_name\": \"nova_base\", \"config_settings\": {\"map_merge\": [{\"nova::placement::project_name\": \"service\", \"nova::cron::archive_deleted_rows::minute\": {\"get_param\": \"NovaCronArchiveDeleteRowsMinute\"}, \"nova::db::sync_api::db_sync_timeout\": {\"get_param\": \"DatabaseSyncTimeout\"}, \"nova::network::neutron::dhcp_domain\": \"\", \"nova::network::neutron::neutron_auth_type\": \"v3password\", \"nova::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"nova::cron::archive_deleted_rows::user\": {\"get_param\": \"NovaCronArchiveDeleteRowsUser\"}, \"nova::cell0_database_connection\": {\"make_url\": {\"username\": \"nova\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"NovaPassword\"}, \"path\": \"/nova_cell0\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"nova::placement_database_connection\": {\"make_url\": {\"username\": \"nova_placement\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"NovaPassword\"}, \"path\": \"/nova_placement\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"nova::database_connection\": {\"make_url\": {\"username\": \"nova\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"NovaPassword\"}, \"path\": \"/nova\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"nova::placement::os_interface\": {\"get_param\": \"NovaPlacementAPIInterface\"}, \"nova::cron::archive_deleted_rows::max_rows\": {\"get_param\": \"NovaCronArchiveDeleteRowsMaxRows\"}, \"nova::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"nova::cron::archive_deleted_rows::until_complete\": {\"get_param\": \"NovaCronArchiveDeleteRowsUntilComplete\"}, \"nova::use_ipv6\": {\"get_param\": \"NovaIPv6\"}, \"nova::db::sync::db_sync_timeout\": {\"get_param\": \"DatabaseSyncTimeout\"}, \"nova::cron::archive_deleted_rows::weekday\": {\"get_param\": \"NovaCronArchiveDeleteRowsWeekday\"}, \"nova::purge_config\": {\"get_param\": \"EnableConfigPurge\"}, \"nova::db::database_max_retries\": -1, \"nova::placement::os_region_name\": {\"get_param\": \"KeystoneRegion\"}, \"nova::api_database_connection\": {\"make_url\": {\"username\": \"nova_api\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"NovaPassword\"}, \"path\": \"/nova_api\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"nova::network::neutron::neutron_password\": {\"get_param\": \"NeutronPassword\"}, \"nova::network::neutron::neutron_url\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"uri\"]}, \"nova::placement::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"nova::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"nova::glance_api_servers\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"uri\"]}, \"nova::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"NovaDebug\"}]}, \"nova::cron::archive_deleted_rows::month\": {\"get_param\": \"NovaCronArchiveDeleteRowsMonth\"}, \"nova::rabbit_heartbeat_timeout_threshold\": 60, \"nova::network::neutron::neutron_auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneV3Admin\", \"uri\"]}, \"nova::network::neutron::neutron_region_name\": {\"get_param\": \"KeystoneRegion\"}, \"nova::notify_on_state_change\": \"vm_and_task_state\", \"nova::host\": \"%{::fqdn}\", \"nova::cron::archive_deleted_rows::hour\": {\"get_param\": \"NovaCronArchiveDeleteRowsHour\"}, \"nova::placement::password\": {\"get_param\": \"NovaPassword\"}, \"nova::cron::archive_deleted_rows::monthday\": {\"get_param\": \"NovaCronArchiveDeleteRowsMonthday\"}, \"nova::db::database_db_max_retries\": -1, \"nova::cinder_catalog_info\": \"volumev2:cinderv2:internalURL\", \"nova::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"nova::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"nova::network::neutron::neutron_project_name\": \"service\", \"nova::network::neutron::neutron_username\": \"neutron\", \"nova::cron::archive_deleted_rows::destination\": {\"get_param\": \"NovaCronArchiveDeleteRowsDestination\"}, \"nova::network::neutron::neutron_ovs_bridge\": {\"get_param\": \"NovaOVSBridge\"}}, {\"if\": [\"compute_upgrade_level_empty\", {}, {\"nova::upgrade_level_compute\": {\"get_param\": \"UpgradeLevelNovaCompute\"}}]}]}, \"service_config_settings\": {\"mysql\": {\"nova::db::mysql_api::setup_cell0\": true, \"nova::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"nova::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"nova::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"nova::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"compute_upgrade_level_empty\": {\"equals\": [{\"get_param\": \"UpgradeLevelNovaCompute\"}, \"\"]}, \"service_debug_unset\": {\"equals\": [{\"get_param\": \"NovaDebug\"}, \"\"]}}, \"description\": \"OpenStack Nova base service. Shared for all Nova services.\
0.355 | 3311: \", \"parameters\": {\"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"NovaCronArchiveDeleteRowsUser\": {\"default\": \"nova\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - User\
0.355 | 3311: \"}, \"UpgradeLevelNovaCompute\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Nova Compute upgrade level\"}, \"NeutronPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the neutron service and db account, used by neutron agents.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DatabaseSyncTimeout\": {\"default\": 300, \"type\": \"number\", \"description\": \"DB Sync Timeout default\"}, \"NovaOVSBridge\": {\"default\": \"br-int\", \"type\": \"string\", \"description\": \"Name of integration bridge used by Open vSwitch\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"NovaCronArchiveDeleteRowsUntilComplete\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Cron to move deleted instances to another table - Until complete\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NovaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the nova service and db account\"}, \"NovaCronArchiveDeleteRowsMaxRows\": {\"default\": \"100\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Max Rows\
0.355 | 3311: \"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EnableConfigPurge\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Remove configuration that is not generated by TripleO. Used to avoid configuration remnants after upgrades.\
0.355 | 3311: \"}, \"NovaIPv6\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Enable IPv6 features in Nova\"}, \"NovaCronArchiveDeleteRowsHour\": {\"default\": \"0\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Hour\
0.355 | 3311: \"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"NovaCronArchiveDeleteRowsDestination\": {\"default\": \"/var/log/nova/nova-rowsflush.log\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Log destination\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NovaCronArchiveDeleteRowsMonth\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Month\
0.355 | 3311: \"}, \"NovaCronArchiveDeleteRowsMonthday\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Month Day\
0.355 | 3311: \"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NovaCronArchiveDeleteRowsWeekday\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Week Day\
0.355 | 3311: \"}, \"NovaCronArchiveDeleteRowsMinute\": {\"default\": \"1\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Minute\
0.355 | 3311: \"}, \"NovaDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Nova services.\"}, \"NovaPlacementAPIInterface\": {\"default\": \"internal\", \"type\": \"string\", \"description\": \"Endpoint interface to be used for the placement API.\
0.355 | 3311: \"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/validation-scripts/all-nodes.sh": "#!/bin/bash
0.355 | 3311: set -e
0.355 | 3311:
0.355 | 3311: function ping_retry() {
0.355 | 3311: local IP_ADDR=$1
0.355 | 3311: local TIMES=${2:-'10'}
0.355 | 3311: local COUNT=0
0.355 | 3311: local PING_CMD=ping
0.355 | 3311: if [[ $IP_ADDR =~ \":\" ]]; then
0.355 | 3311: PING_CMD=ping6
0.355 | 3311: fi
0.355 | 3311: until [ $COUNT -ge $TIMES ]; do
0.355 | 3311: if $PING_CMD -w 10 -c 1 $IP_ADDR &> /dev/null; then
0.355 | 3311: echo \"Ping to $IP_ADDR succeeded.\"
0.355 | 3311: return 0
0.355 | 3311: fi
0.355 | 3311: echo \"Ping to $IP_ADDR failed. Retrying...\"
0.355 | 3311: COUNT=$(($COUNT + 1))
0.355 | 3311: sleep 60
0.355 | 3311: done
0.355 | 3311: return 1
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # For each unique remote IP (specified via Heat) we check to
0.355 | 3311: # see if one of the locally configured networks matches and if so we
0.355 | 3311: # attempt a ping test the remote network IP.
0.355 | 3311: function ping_controller_ips() {
0.355 | 3311: local REMOTE_IPS=$1
0.355 | 3311: for REMOTE_IP in $(echo $REMOTE_IPS | sed -e \"s| |\
0.355 | 3311: |g\" | sort -u); do
0.355 | 3311: if [[ $REMOTE_IP =~ \":\" ]]; then
0.355 | 3311: networks=$(ip -6 r | grep -v default | cut -d \" \" -f 1 | grep -v \"unreachable\")
0.355 | 3311: else
0.355 | 3311: networks=$(ip r | grep -v default | cut -d \" \" -f 1)
0.355 | 3311: fi
0.355 | 3311: for LOCAL_NETWORK in $networks; do
0.355 | 3311: in_network=$(python -c \"import ipaddr; net=ipaddr.IPNetwork('$LOCAL_NETWORK'); addr=ipaddr.IPAddress('$REMOTE_IP'); print(addr in net)\")
0.355 | 3311: if [[ $in_network == \"True\" ]]; then
0.355 | 3311: echo \"Trying to ping $REMOTE_IP for local network ${LOCAL_NETWORK}.\"
0.355 | 3311: set +e
0.355 | 3311: if ! ping_retry $REMOTE_IP; then
0.355 | 3311: echo \"FAILURE\"
0.355 | 3311: echo \"$REMOTE_IP is not pingable. Local Network: $LOCAL_NETWORK\" >&2
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: set -e
0.355 | 3311: echo \"SUCCESS\"
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: done
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # Ping all default gateways. There should only be one
0.355 | 3311: # if using upstream t-h-t network templates but we test
0.355 | 3311: # all of them should some manual network config have
0.355 | 3311: # multiple gateways.
0.355 | 3311: function ping_default_gateways() {
0.355 | 3311: DEFAULT_GW=$(ip r | grep ^default | cut -d \" \" -f 3)
0.355 | 3311: set +e
0.355 | 3311: for GW in $DEFAULT_GW; do
0.355 | 3311: echo -n \"Trying to ping default gateway ${GW}...\"
0.355 | 3311: if ! ping_retry $GW; then
0.355 | 3311: echo \"FAILURE\"
0.355 | 3311: echo \"$GW is not pingable.\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: set -e
0.355 | 3311: echo \"SUCCESS\"
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # Verify the FQDN from the nova/ironic deployment matches
0.355 | 3311: # FQDN in the heat templates.
0.355 | 3311: function fqdn_check() {
0.355 | 3311: HOSTNAME=$(hostname)
0.355 | 3311: SHORT_NAME=$(hostname -s)
0.355 | 3311: FQDN_FROM_HOSTS=$(awk '$3 == \"'${SHORT_NAME}'\"{print $2}' /etc/hosts)
0.355 | 3311: echo -n \"Checking hostname vs /etc/hosts entry...\"
0.355 | 3311: if [[ $HOSTNAME != $FQDN_FROM_HOSTS ]]; then
0.355 | 3311: echo \"FAILURE\"
0.355 | 3311: echo -e \"System hostname: ${HOSTNAME}\
0.355 | 3311: Entry from /etc/hosts: ${FQDN_FROM_HOSTS}\
0.355 | 3311: \"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: echo \"SUCCESS\"
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: # Verify at least one time source is available.
0.355 | 3311: function ntp_check() {
0.355 | 3311: NTP_SERVERS=$(hiera ntp::servers nil |tr -d '[],\"')
0.355 | 3311: if [[ \"$NTP_SERVERS\" != \"nil\" ]];then
0.355 | 3311: echo -n \"Testing NTP...\"
0.355 | 3311: NTP_SUCCESS=0
0.355 | 3311: for NTP_SERVER in $NTP_SERVERS; do
0.355 | 3311: set +e
0.355 | 3311: NTPDATE_OUT=$(ntpdate -qud $NTP_SERVER 2>&1)
0.355 | 3311: NTPDATE_EXIT=$?
0.355 | 3311: set -e
0.355 | 3311: if [[ \"$NTPDATE_EXIT\" == \"0\" ]];then
0.355 | 3311: NTP_SUCCESS=1
0.355 | 3311: break
0.355 | 3311: else
0.355 | 3311: NTPDATE_OUT_FULL=\"$NTPDATE_OUT_FULL $NTPDATE_OUT\"
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: if [[ \"$NTP_SUCCESS\" == \"0\" ]];then
0.355 | 3311: echo \"FAILURE\"
0.355 | 3311: echo \"$NTPDATE_OUT_FULL\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: echo \"SUCCESS\"
0.355 | 3311: fi
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: ping_controller_ips \"$ping_test_ips\"
0.355 | 3311: ping_default_gateways
0.355 | 3311: if [[ $validate_fqdn == \"True\" ]];then
0.355 | 3311: fqdn_check
0.355 | 3311: fi
0.355 | 3311: if [[ $validate_ntp == \"True\" ]];then
0.355 | 3311: ntp_check
0.355 | 3311: fi
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/rabbitmq.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the RabbitMQ role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionRabbitmq\"}, \"config_settings\": {\"map_merge\": [{\"rabbitmq::file_limit\": {\"get_param\": \"RabbitFDLimit\"}, \"rabbitmq::tcp_keepalive\": true, \"rabbitmq_kernel_variables\": {\"inet_dist_listen_min\": \"25672\", \"inet_dist_listen_max\": \"25672\"}, \"rabbitmq::nr_ha_queues\": {\"get_param\": \"RabbitHAQueues\"}, \"rabbitmq::ssl_erl_dist\": {\"get_param\": \"EnableInternalTLS\"}, \"rabbitmq::interface\": {\"get_param\": [\"ServiceNetMap\", \"RabbitmqNetwork\"]}, \"rabbitmq::repos_ensure\": false, \"rabbitmq::default_user\": {\"get_param\": \"RabbitUserName\"}, \"rabbitmq::erlang_cookie\": {\"yaql\": {\"expression\": \"$.data.passwords.where($ != '').first()\", \"data\": {\"passwords\": [{\"get_param\": \"RabbitCookie\"}, {\"get_param\": [\"DefaultPasswords\", \"rabbit_cookie\"]}]}}}, \"rabbit_ipv6\": {\"get_param\": \"RabbitIPv6\"}, \"tripleo.rabbitmq.firewall_rules\": {\"109 rabbitmq\": {\"dport\": [4369, 5672, 25672]}}, \"rabbitmq::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"rabbitmq_config_variables\": {\"queue_master_locator\": \"<<\\\"min-masters\\\">>\", \"loopback_users\": \"[]\", \"cluster_partition_handling\": \"pause_minority\"}, \"rabbitmq::default_pass\": {\"get_param\": \"RabbitPassword\"}, \"rabbitmq_environment\": {\"NODE_IP_ADDRESS\": \"\", \"NODE_PORT\": \"\", \"export ERL_EPMD_ADDRESS\": \"%{hiera('rabbitmq::interface')}\", \"RABBITMQ_SERVER_ERL_ARGS\": \"\\\"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<15000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<15000:64/native>>}]\\\"\", \"RABBITMQ_NODENAME\": \"rabbit@%{::hostname}\"}, \"rabbitmq::port\": 5672, \"rabbitmq::delete_guest_user\": false, \"rabbitmq::wipe_db_on_cookie_change\": true, \"rabbitmq::ssl_only\": {\"get_param\": \"EnableInternalTLS\"}, \"rabbitmq::ssl_port\": 5672, \"rabbitmq::package_source\": \"undef\", \"rabbitmq::package_provider\": \"yum\", \"tripleo::profile::base::rabbitmq::enable_internal_tls\": {\"get_param\": \"EnableInternalTLS\"}, \"rabbitmq::ssl_interface\": {\"get_param\": [\"ServiceNetMap\", \"RabbitmqNetwork\"]}, \"rabbitmq::ssl_depth\": 1}, {\"if\": [\"internal_tls_enabled\", {\"tripleo::profile::base::rabbitmq::certificate_specs\": {\"service_certificate\": \"/etc/pki/tls/certs/rabbitmq.crt\", \"hostname\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"RabbitmqNetwork\"]}}, \"template\": \"%{hiera('fqdn_NETWORK')}\"}}, \"service_key\": \"/etc/pki/tls/private/rabbitmq.key\", \"principal\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"RabbitmqNetwork\"]}}, \"template\": \"rabbitmq/%{hiera('fqdn_NETWORK')}\"}}}, \"generate_service_certificates\": true}, {}]}]}, \"upgrade_tasks\": [{\"name\": \"Stop rabbitmq service\", \"service\": \"name=rabbitmq-server state=stopped\", \"tags\": \"step2\"}, {\"name\": \"Start rabbitmq service\", \"service\": \"name=rabbitmq-server state=started\", \"tags\": \"step4\"}], \"metadata_settings\": {\"if\": [\"internal_tls_enabled\", [{\"type\": \"node\", \"network\": {\"get_param\": [\"ServiceNetMap\", \"RabbitmqNetwork\"]}, \"service\": \"rabbitmq\"}], null]}, \"service_name\": \"rabbitmq\", \"step_config\": \"include ::tripleo::profile::base::rabbitmq\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"internal_tls_enabled\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}}, \"description\": \"RabbitMQ service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"RabbitFDLimit\": {\"default\": 65536, \"type\": \"string\", \"description\": \"Configures RabbitMQ FD limit\"}, \"RabbitIPv6\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Enable IPv6 in RabbitMQ\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RabbitCookie\": {\"default\": \"\", \"hidden\": true, \"type\": \"string\"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"RabbitHAQueues\": {\"default\": -1, \"type\": \"number\", \"description\": \"The number of HA queues to be configured in rabbit. The default is -1 which translates to \\\"ha-mode all\\\". The special value 0 will be automatically overridden to CEIL(N/2) where N is the number of nodes running rabbitmq.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"MonitoringSubscriptionRabbitmq\": {\"default\": \"overcloud-rabbitmq\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-ovs-agent.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron OVS agent configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NeutronEnableDVR\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Enable Neutron DVR.\"}, \"NeutronEnableARPResponder\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Enable ARP responder feature in the OVS Agent.\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NeutronTunnelTypes\": {\"default\": \"vxlan\", \"type\": \"comma_delimited_list\", \"description\": \"The tunnel types for the Neutron tenant network.\"}, \"MonitoringSubscriptionNeutronOvs\": {\"default\": \"overcloud-neutron-ovs-agent\", \"type\": \"string\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NeutronEnableL2Pop\": {\"default\": \"False\", \"type\": \"string\", \"description\": \"Enable/disable the L2 population feature in the Neutron agents.\"}, \"NeutronAgentExtensions\": {\"default\": \"qos\", \"type\": \"comma_delimited_list\", \"description\": \"Comma-separated list of extensions enabled for the Neutron agents.\
0.355 | 3311: \"}, \"NeutronBridgeMappings\": {\"default\": \"datacentre:br-ex\", \"type\": \"comma_delimited_list\", \"description\": \"The OVS logical->physical bridge mappings to use. See the Neutron documentation for details. Defaults to mapping br-ex - the external bridge on hosts - to a physical name 'datacentre' which can be used to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name.\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NeutronOVSFirewallDriver\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Configure the classname of the firewall driver to use for implementing\
0.355 | 3311: security groups. Possible values depend on system configuration. Some\
0.355 | 3311: examples are: noop, openvswitch, iptables_hybrid. The default value of an\
0.355 | 3311: empty string will result in a default supported configuration.\
0.355 | 3311: \"}, \"NeutronOpenVswitchAgentLoggingSource\": {\"default\": {\"path\": \"/var/log/neutron/openvswitch-agent.log\", \"tag\": \"openstack.neutron.agent.openvswitch\"}, \"type\": \"json\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron OVS agent service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNeutronOvs\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronBase\", \"role_data\", \"config_settings\"]}, {\"neutron::agents::ml2::ovs::local_ip\": {\"get_param\": [\"ServiceNetMap\", \"NeutronTenantNetwork\"]}, \"tripleo.neutron_ovs_agent.firewall_rules\": {\"136 neutron gre networks\": {\"proto\": \"gre\"}, \"118 neutron vxlan networks\": {\"dport\": 4789, \"proto\": \"udp\"}}, \"neutron::agents::ml2::ovs::tunnel_types\": {\"get_param\": \"NeutronTunnelTypes\"}, \"neutron::agents::ml2::ovs::enable_distributed_routing\": {\"get_param\": \"NeutronEnableDVR\"}, \"neutron::agents::ml2::ovs::bridge_mappings\": {\"get_param\": \"NeutronBridgeMappings\"}, \"neutron::agents::ml2::ovs::arp_responder\": {\"get_param\": \"NeutronEnableARPResponder\"}, \"neutron::agents::ml2::ovs::extensions\": {\"get_param\": \"NeutronAgentExtensions\"}, \"neutron::agents::ml2::ovs::l2_population\": {\"get_param\": \"NeutronEnableL2Pop\"}}, {\"if\": [\"no_firewall_driver\", {}, {\"neutron::agents::ml2::ovs::firewall_driver\": {\"get_param\": \"NeutronOVSFirewallDriver\"}}]}]}, \"upgrade_tasks\": {\"list_concat\": [{\"get_attr\": [\"Ovs\", \"role_data\", \"upgrade_tasks\"]}, [{\"ignore_errors\": true, \"register\": \"neutron_ovs_agent_enabled\", \"command\": \"systemctl is-enabled neutron-openvswitch-agent\", \"name\": \"Check if neutron_ovs_agent is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"neutron_ovs_agent_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running\", \"tags\": \"step0,validation\"}, {\"when\": \"neutron_ovs_agent_enabled.rc == 0\", \"name\": \"Stop neutron_ovs_agent service\", \"service\": \"name=neutron-openvswitch-agent state=stopped\", \"tags\": \"step1\"}]]}, \"logging_groups\": [\"neutron\"], \"service_name\": \"neutron_ovs_agent\", \"logging_source\": {\"get_param\": \"NeutronOpenVswitchAgentLoggingSource\"}, \"step_config\": \"include ::tripleo::profile::base::neutron::ovs\
0.355 | 3311: \"}}}, \"conditions\": {\"no_firewall_driver\": {\"equals\": [{\"get_param\": \"NeutronOVSFirewallDriver\"}, \"\"]}}, \"resources\": {\"NeutronBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"Ovs\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/openvswitch.yaml\", \"properties\": {\"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-notifier.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Aodh Notifier service.\", \"value\": {\"service_name\": \"aodh_notifier\", \"step_config\": \"include tripleo::profile::base::aodh::notifier\
0.355 | 3311: \", \"config_settings\": {\"get_attr\": [\"AodhBase\", \"role_data\", \"config_settings\"]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"aodh_notifier_enabled\", \"command\": \"systemctl is-enabled openstack-aodh-notifier\", \"name\": \"Check if aodh_notifier is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-aodh-notifier' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"aodh_notifier_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-aodh-notifier is running\", \"tags\": \"step0,validation\"}, {\"when\": \"aodh_notifier_enabled.rc == 0\", \"name\": \"Stop aodh_notifier service\", \"service\": \"name=openstack-aodh-notifier state=stopped\", \"tags\": \"step1\"}], \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionAodhNotifier\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Aodh Notifier service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"MonitoringSubscriptionAodhNotifier\": {\"default\": \"overcloud-ceilometer-aodh-notifier\", \"type\": \"string\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"AodhBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-scheduler.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova Scheduler service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaScheduler\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, {\"nova::ram_allocation_ratio\": \"1.0\", \"nova::scheduler::filter::scheduler_default_filters\": {\"get_param\": \"NovaSchedulerDefaultFilters\"}, \"nova::scheduler::discover_hosts_in_cells_interval\": {\"get_param\": \"NovaSchedulerDiscoverHostsInCellsInterval\"}, \"nova::scheduler::filter::scheduler_available_filters\": {\"get_param\": \"NovaSchedulerAvailableFilters\"}}]}, \"upgrade_tasks\": [{\"name\": \"Stop nova_scheduler service\", \"service\": \"name=openstack-nova-scheduler state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"nova\"], \"service_name\": \"nova_scheduler\", \"logging_source\": {\"get_param\": \"NovaSchedulerLoggingSource\"}, \"step_config\": \"include tripleo::profile::base::nova::scheduler\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova Scheduler service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NovaSchedulerAvailableFilters\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"List of scheduler available filters\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NovaSchedulerDefaultFilters\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"An array of filters used by Nova to filter a node.These filters will be applied in the order they are listed, so place your most restrictive filters first to make the filtering process more efficient.\
0.355 | 3311: \"}, \"NovaSchedulerLoggingSource\": {\"default\": {\"path\": \"/var/log/nova/nova-scheduler.log\", \"tag\": \"openstack.nova.scheduler\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NovaSchedulerDiscoverHostsInCellsInterval\": {\"default\": -1, \"type\": \"number\", \"description\": \"This value controls how often (in seconds) the scheduler should attempt to discover new hosts that have been added to cells. The default value of -1 disables the periodic task completely. It is recommended to set this parameter for deployments using Ironic.\
0.355 | 3311: \"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"MonitoringSubscriptionNovaScheduler\": {\"default\": \"overcloud-nova-scheduler\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-api.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Cinder API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"MonitoringSubscriptionCinderApi\": {\"default\": \"overcloud-cinder-api\", \"type\": \"string\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"CinderApiLoggingSource\": {\"default\": {\"path\": \"/var/log/cinder/cinder-api.log\", \"tag\": \"openstack.cinder.api\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"CinderApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Cinder API.\
0.355 | 3311: e.g. { cinder-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"CinderWorkers\": {\"default\": \"%{::os_workers}\", \"type\": \"string\", \"description\": \"Set the number of workers for cinder::wsgi::apache\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"CinderEnableDBPurge\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to create cron job for purging soft deleted rows in Cinder database.\
0.355 | 3311: \"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"CinderPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the cinder service account, used by cinder-api.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Cinder API role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"CinderBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"cinder::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"cinder::keystone::authtoken::project_domain_name\": \"Default\", \"cinder::api::enable_proxy_headers_parsing\": true, \"cinder::keystone::authtoken::user_domain_name\": \"Default\", \"cinder::api::nova_catalog_info\": \"compute:nova:internalURL\", \"cinder::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"cinder::api::bind_host\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"tripleo.cinder_api.firewall_rules\": {\"119 cinder\": {\"dport\": [8776, 13776]}}, \"cinder::api::service_name\": \"httpd\", \"cinder::keystone::authtoken::password\": {\"get_param\": \"CinderPassword\"}, \"cinder::config\": {\"DEFAULT/swift_catalog_info\": {\"value\": \"object-store:swift:internalURL\"}}, \"cinder::api::nova_catalog_admin_info\": \"compute:nova:adminURL\", \"cinder::wsgi::apache::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}, \"cinder::keystone::authtoken::project_name\": \"service\", \"cinder::wsgi::apache::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"cinder::policy::policies\": {\"get_param\": \"CinderApiPolicies\"}, \"tripleo::profile::base::cinder::cinder_enable_db_purge\": {\"get_param\": \"CinderEnableDBPurge\"}, \"cinder::ceilometer::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"cinder::wsgi::apache::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}}, {\"if\": [\"cinder_workers_zero\", {}, {\"cinder::wsgi::apache::workers\": {\"get_param\": \"CinderWorkers\"}}]}]}, \"upgrade_tasks\": {\"list_concat\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"upgrade_tasks\"]}, [{\"ignore_errors\": true, \"register\": \"cinder_api_enabled\", \"command\": \"systemctl is-enabled openstack-cinder-api\", \"name\": \"Check if cinder_api is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"cinder_api_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-cinder-api is running\", \"tags\": \"step0,validation\"}, {\"ignore_errors\": true, \"shell\": \"httpd -t -D DUMP_VHOSTS | grep -q cinder\", \"register\": \"cinder_apache\", \"name\": \"check for cinder running under apache (post upgrade)\", \"tags\": \"step1\"}, {\"when\": \"cinder_apache.rc == 0\", \"name\": \"Stop cinder_api service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}, {\"when\": \"cinder_api_enabled.rc == 0\", \"name\": \"Stop and disable cinder_api service (pre-upgrade not under httpd)\", \"service\": \"name=openstack-cinder-api state=stopped enabled=no\", \"tags\": \"step1\"}]]}, \"logging_groups\": [\"cinder\"], \"service_name\": \"cinder_api\", \"step_config\": \"include ::tripleo::profile::base::cinder::api\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionCinderApi\"}, \"service_config_settings\": {\"keystone\": {\"cinder::keystone::auth::public_url_v2\": {\"get_param\": [\"EndpointMap\", \"CinderV2Public\", \"uri\"]}, \"cinder::keystone::auth::public_url_v3\": {\"get_param\": [\"EndpointMap\", \"CinderV3Public\", \"uri\"]}, \"cinder::keystone::auth::internal_url_v2\": {\"get_param\": [\"EndpointMap\", \"CinderV2Internal\", \"uri\"]}, \"cinder::keystone::auth::internal_url_v3\": {\"get_param\": [\"EndpointMap\", \"CinderV3Internal\", \"uri\"]}, \"cinder::keystone::auth::admin_url_v3\": {\"get_param\": [\"EndpointMap\", \"CinderV3Admin\", \"uri\"]}, \"cinder::keystone::auth::admin_url_v2\": {\"get_param\": [\"EndpointMap\", \"CinderV2Admin\", \"uri\"]}, \"cinder::keystone::auth::password\": {\"get_param\": \"CinderPassword\"}, \"cinder::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"uri\"]}, \"cinder::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"uri\"]}, \"cinder::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"cinder::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"uri\"]}, \"cinder::keystone::auth::tenant\": \"service\"}, \"mysql\": {\"cinder::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"cinder::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"cinder::db::mysql::dbname\": \"cinder\", \"cinder::db::mysql::password\": {\"get_param\": \"CinderPassword\"}, \"cinder::db::mysql::user\": \"cinder\"}}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"CinderApiLoggingSource\"}}}}, \"conditions\": {\"cinder_workers_zero\": {\"equals\": [{\"get_param\": \"CinderWorkers\"}, 0]}}, \"resources\": {\"CinderBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Cinder base service.\", \"value\": {\"service_name\": \"cinder_base\", \"config_settings\": {\"cinder::cron::db_purge::age\": {\"get_param\": \"CinderCronDbPurgeAge\"}, \"cinder::cron::db_purge::minute\": {\"get_param\": \"CinderCronDbPurgeMinute\"}, \"cinder::cron::db_purge::weekday\": {\"get_param\": \"CinderCronDbPurgeWeekday\"}, \"cinder::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"cinder::cron::db_purge::destination\": {\"get_param\": \"CinderCronDbPurgeDestination\"}, \"cinder::db::database_max_retries\": -1, \"cinder::db::database_db_max_retries\": -1, \"cinder::database_connection\": {\"make_url\": {\"username\": \"cinder\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"CinderPassword\"}, \"path\": \"/cinder\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"cinder::cron::db_purge::hour\": {\"get_param\": \"CinderCronDbPurgeHour\"}, \"cinder::cron::db_purge::month\": {\"get_param\": \"CinderCronDbPurgeMonth\"}, \"cinder::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"cinder::glance::glance_api_servers\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"uri\"]}, \"cinder::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"cinder::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"cinder::cron::db_purge::monthday\": {\"get_param\": \"CinderCronDbPurgeMonthday\"}, \"cinder::cron::db_purge::user\": {\"get_param\": \"CinderCronDbPurgeUser\"}, \"cinder::rabbit_heartbeat_timeout_threshold\": 60, \"cinder::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"CinderDebug\"}]}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"CinderDebug\"}, \"\"]}}, \"description\": \"OpenStack Cinder base service. Shared by all Cinder services.\
0.355 | 3311: \", \"parameters\": {\"CinderCronDbPurgeUser\": {\"default\": \"keystone\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - User\
0.355 | 3311: \"}, \"CinderCronDbPurgeAge\": {\"default\": \"0\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Age\
0.355 | 3311: \"}, \"CinderCronDbPurgeMinute\": {\"default\": \"1\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Minute\
0.355 | 3311: \"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"CinderDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on Cinder services.\"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"CinderCronDbPurgeDestination\": {\"default\": \"/var/log/cinder/cinder-rowsflush.log\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Log destination\
0.355 | 3311: \"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"CinderCronDbPurgeHour\": {\"default\": \"0\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Hour\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"CinderCronDbPurgeMonthday\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Month Day\
0.355 | 3311: \"}, \"CinderCronDbPurgeWeekday\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Week Day\
0.355 | 3311: \"}, \"CinderCronDbPurgeMonth\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to move deleted instances to another table - Month\
0.355 | 3311: \"}, \"CinderPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the cinder service account, used by cinder-api.\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/manifests/overcloud_role.pp": "# Copyright 2014 Red Hat, Inc.
0.355 | 3311: # All Rights Reserved.
0.355 | 3311: #
0.355 | 3311: # Licensed under the Apache License, Version 2.0 (the \"License\"); you may
0.355 | 3311: # not use this file except in compliance with the License. You may obtain
0.355 | 3311: # a copy of the License at
0.355 | 3311: #
0.355 | 3311: # http://www.apache.org/licenses/LICENSE-2.0
0.355 | 3311: #
0.355 | 3311: # Unless required by applicable law or agreed to in writing, software
0.355 | 3311: # distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT
0.355 | 3311: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
0.355 | 3311: # License for the specific language governing permissions and limitations
0.355 | 3311: # under the License.
0.355 | 3311:
0.355 | 3311: # The content of this file will be used to generate
0.355 | 3311: # the puppet manifests for all roles, the placeholder
0.355 | 3311: # __ROLE__ will be replaced by 'controller', 'blockstorage',
0.355 | 3311: # 'cephstorage' and all the deployed roles.
0.355 | 3311:
0.355 | 3311: if hiera('step') >= 4 {
0.355 | 3311: hiera_include('__ROLE___classes', [])
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud___ROLE__', hiera('step')])
0.355 | 3311: package_manifest{$package_manifest_name: ensure => present}
0.355 | 3311:
0.355 | 3311: # NOTE(gfidente): ensure deprecated package manifest is absent, can be removed after Pike
0.355 | 3311: $absent_package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
0.355 | 3311: package_manifest{$absent_package_manifest_name: ensure => absent}
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/time/timezone.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Timezone role using composable services.\", \"value\": {\"service_name\": \"timezone\", \"step_config\": \"include ::timezone\", \"config_settings\": {\"timezone::timezone\": {\"get_param\": \"TimeZone\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"Composable Timezone service\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"TimeZone\": {\"default\": \"UTC\", \"type\": \"string\", \"description\": \"The timezone to be set on the overcloud.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-compute.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Ceilometer Compute Agent role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionCeilometerCompute\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"CeilometerServiceBase\", \"role_data\", \"config_settings\"]}, {\"ceilometer::agent::compute::instance_discovery_method\": {\"get_param\": \"InstanceDiscoveryMethod\"}}, {\"ceilometer_redis_password\": {\"get_param\": \"RedisPassword\"}, \"compute_namespace\": true}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"ceilometer_agent_compute_enabled\", \"command\": \"systemctl is-enabled openstack-ceilometer-compute\", \"name\": \"Check if ceilometer_agent_compute is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-ceilometer-compute' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"ceilometer_agent_compute_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-ceilometer-compute is running\", \"tags\": \"step0,validation\"}, {\"when\": \"ceilometer_agent_compute_enabled.rc == 0\", \"name\": \"Stop ceilometer_agent_compute service\", \"service\": \"name=openstack-ceilometer-compute state=stopped\", \"tags\": \"step1\"}], \"service_config_settings\": {\"get_attr\": [\"CeilometerServiceBase\", \"role_data\", \"service_config_settings\"]}, \"service_name\": \"ceilometer_agent_compute\", \"step_config\": \"include ::tripleo::profile::base::ceilometer::agent::polling\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Ceilometer Compute Agent service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"InstanceDiscoveryMethod\": {\"default\": \"libvirt_metadata\", \"type\": \"string\", \"description\": \"Method used to discover instances running on compute node\", \"constraints\": [{\"allowed_values\": [\"naive\", \"libvirt_metadata\", \"workload_partitioning\"]}]}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RedisPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the redis service account.\"}, \"MonitoringSubscriptionCeilometerCompute\": {\"default\": \"overcloud-ceilometer-agent-compute\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"CeilometerServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/ceilometer-api-disabled.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the disabled Ceilometer API role.\", \"value\": {\"service_name\": \"ceilometer_api_disabled\", \"upgrade_tasks\": [{\"name\": \"Purge Ceilometer apache config files\", \"file\": \"path=/etc/httpd/conf.d/10-ceilometer_wsgi.conf state=absent\", \"tags\": \"step1\"}, {\"lineinfile\": \"dest=/etc/httpd/conf/ports.conf state=absent regexp=\\\"8777$\\\"\", \"name\": \"Clean up ceilometer port from ports.conf\", \"tags\": \"step1\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Ceilometer API service, disabled since pike\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/deployed-server/deployed-server-bootstrap-centos.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Deployed Server Bootstrap Config\", \"parameters\": {\"server\": {\"type\": \"string\"}}, \"resources\": {\"DeployedServerBootstrapConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/deployed-server/deployed-server-bootstrap-centos.sh\"}}}, \"DeployedServerBootstrapDeployment\": {\"type\": \"OS::Heat::SoftwareDeployment\", \"properties\": {\"config\": {\"get_resource\": \"DeployedServerBootstrapConfig\"}, \"name\": \"DeployedServerBootstrapDeployment\", \"server\": {\"get_param\": \"server\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-conductor.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova Conductor service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"MonitoringSubscriptionNovaConductor\": {\"default\": \"overcloud-nova-conductor\", \"type\": \"string\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"UpgradeLevelNovaCompute\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Nova Compute upgrade level\"}, \"NovaSchedulerLoggingSource\": {\"default\": {\"path\": \"/var/log/nova/nova-scheduler.log\", \"tag\": \"openstack.nova.scheduler\"}, \"type\": \"json\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NovaWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Nova services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova Conductor service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaConductor\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, {\"if\": [\"nova_workers_zero\", {}, {\"nova::conductor::workers\": {\"get_param\": \"NovaWorkers\"}}]}]}, \"upgrade_tasks\": [{\"name\": \"Stop nova_conductor service\", \"service\": \"name=openstack-nova-conductor state=stopped\", \"tags\": \"step1\"}, {\"ini_file\": {\"str_replace\": {\"params\": {\"LEVEL\": {\"get_param\": \"UpgradeLevelNovaCompute\"}}, \"template\": \"dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL\"}}, \"name\": \"Set compute upgrade level to auto\", \"tags\": \"step1\"}], \"logging_groups\": [\"nova\"], \"service_name\": \"nova_conductor\", \"logging_source\": {\"get_param\": \"NovaSchedulerLoggingSource\"}, \"step_config\": \"include tripleo::profile::base::nova::conductor\
0.355 | 3311: \"}}}, \"conditions\": {\"nova_workers_zero\": {\"equals\": [{\"get_param\": \"NovaWorkers\"}, 0]}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-api.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Aodh API service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionAodhApi\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"AodhBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"aodh::api::host\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"aodh::policy::policies\": {\"get_param\": \"AodhApiPolicies\"}, \"aodh::wsgi::apache::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"aodh::api::enable_proxy_headers_parsing\": true, \"aodh::wsgi::apache::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"aodh::wsgi::apache::wsgi_process_display_name\": \"aodh_wsgi\", \"aodh::api::gnocchi_external_project_owner\": {\"get_param\": \"GnocchiExternalProject\"}, \"tripleo.aodh_api.firewall_rules\": {\"128 aodh-api\": {\"dport\": [8042, 13042]}}, \"aodh::api::service_name\": \"httpd\", \"aodh::wsgi::apache::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}]}, \"upgrade_tasks\": {\"list_concat\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"upgrade_tasks\"]}, [{\"name\": \"Stop aodh_api service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}]]}, \"service_config_settings\": {\"get_attr\": [\"AodhBase\", \"role_data\", \"service_config_settings\"]}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"service_name\": \"aodh_api\", \"step_config\": \"include tripleo::profile::base::aodh::api\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Aodh API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"GnocchiExternalProject\": {\"default\": \"service\", \"type\": \"string\", \"description\": \"Project name of resources creator in Gnocchi.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"MonitoringSubscriptionAodhApi\": {\"default\": \"overcloud-ceilometer-aodh-api\", \"type\": \"string\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"AodhApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Aodh API.\
0.355 | 3311: e.g. { aodh-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"AodhBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/docker/docker-puppet.py": "#!/usr/bin/env python
0.355 | 3311: #
0.355 | 3311: # Licensed under the Apache License, Version 2.0 (the \"License\"); you may
0.355 | 3311: # not use this file except in compliance with the License. You may obtain
0.355 | 3311: # a copy of the License at
0.355 | 3311: #
0.355 | 3311: # http://www.apache.org/licenses/LICENSE-2.0
0.355 | 3311: #
0.355 | 3311: # Unless required by applicable law or agreed to in writing, software
0.355 | 3311: # distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT
0.355 | 3311: # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
0.355 | 3311: # License for the specific language governing permissions and limitations
0.355 | 3311: # under the License.
0.355 | 3311:
0.355 | 3311: # Shell script tool to run puppet inside of the given docker container image.
0.355 | 3311: # Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON
0.355 | 3311: # array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
0.355 | 3311: # that can be used to generate config files or run ad-hoc puppet modules
0.355 | 3311: # inside of a container.
0.355 | 3311:
0.355 | 3311: import glob
0.355 | 3311: import json
0.355 | 3311: import logging
0.355 | 3311: import os
0.355 | 3311: import subprocess
0.355 | 3311: import sys
0.355 | 3311: import tempfile
0.355 | 3311: import time
0.355 | 3311: import multiprocessing
0.355 | 3311:
0.355 | 3311: logger = None
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: def get_logger():
0.355 | 3311: global logger
0.355 | 3311: if logger is None:
0.355 | 3311: logger = logging.getLogger()
0.355 | 3311: ch = logging.StreamHandler(sys.stdout)
0.355 | 3311: if os.environ.get('DEBUG', False):
0.355 | 3311: logger.setLevel(logging.DEBUG)
0.355 | 3311: ch.setLevel(logging.DEBUG)
0.355 | 3311: else:
0.355 | 3311: logger.setLevel(logging.INFO)
0.355 | 3311: ch.setLevel(logging.INFO)
0.355 | 3311: formatter = logging.Formatter('%(asctime)s %(levelname)s: '
0.355 | 3311: '%(process)s -- %(message)s')
0.355 | 3311: ch.setFormatter(formatter)
0.355 | 3311: logger.addHandler(ch)
0.355 | 3311: return logger
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: # this is to match what we do in deployed-server
0.355 | 3311: def short_hostname():
0.355 | 3311: subproc = subprocess.Popen(['hostname', '-s'],
0.355 | 3311: stdout=subprocess.PIPE,
0.355 | 3311: stderr=subprocess.PIPE)
0.355 | 3311: cmd_stdout, cmd_stderr = subproc.communicate()
0.355 | 3311: return cmd_stdout.rstrip()
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: def pull_image(name):
0.355 | 3311: log.info('Pulling image: %s' % name)
0.355 | 3311: retval = -1
0.355 | 3311: count = 0
0.355 | 3311: while retval != 0:
0.355 | 3311: count += 1
0.355 | 3311: subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
0.355 | 3311: stdout=subprocess.PIPE,
0.355 | 3311: stderr=subprocess.PIPE)
0.355 | 3311:
0.355 | 3311: cmd_stdout, cmd_stderr = subproc.communicate()
0.355 | 3311: retval = subproc.returncode
0.355 | 3311: if retval != 0:
0.355 | 3311: time.sleep(3)
0.355 | 3311: log.warning('docker pull failed: %s' % cmd_stderr)
0.355 | 3311: log.warning('retrying pulling image: %s' % name)
0.355 | 3311: if count >= 5:
0.355 | 3311: log.error('Failed to pull image: %s' % name)
0.355 | 3311: break
0.355 | 3311: if cmd_stdout:
0.355 | 3311: log.debug(cmd_stdout)
0.355 | 3311: if cmd_stderr:
0.355 | 3311: log.debug(cmd_stderr)
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: def match_config_volume(prefix, config):
0.355 | 3311: # Match the mounted config volume - we can't just use the
0.355 | 3311: # key as e.g \"novacomute\" consumes config-data/nova
0.355 | 3311: volumes = config.get('volumes', [])
0.355 | 3311: config_volume = None
0.355 | 3311: for v in volumes:
0.355 | 3311: if v.startswith(prefix):
0.355 | 3311: config_volume = os.path.relpath(
0.355 | 3311: v.split(\":\")[0], prefix).split(\"/\")[0]
0.355 | 3311: break
0.355 | 3311: return config_volume
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: def get_config_hash(prefix, config_volume):
0.355 | 3311: hashfile = os.path.join(prefix, \"%s.md5sum\" % config_volume)
0.355 | 3311: hash_data = None
0.355 | 3311: if os.path.isfile(hashfile):
0.355 | 3311: with open(hashfile) as f:
0.355 | 3311: hash_data = f.read().rstrip()
0.355 | 3311: return hash_data
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: def rm_container(name):
0.355 | 3311: if os.environ.get('SHOW_DIFF', None):
0.355 | 3311: log.info('Diffing container: %s' % name)
0.355 | 3311: subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
0.355 | 3311: stdout=subprocess.PIPE,
0.355 | 3311: stderr=subprocess.PIPE)
0.355 | 3311: cmd_stdout, cmd_stderr = subproc.communicate()
0.355 | 3311: if cmd_stdout:
0.355 | 3311: log.debug(cmd_stdout)
0.355 | 3311: if cmd_stderr:
0.355 | 3311: log.debug(cmd_stderr)
0.355 | 3311:
0.355 | 3311: log.info('Removing container: %s' % name)
0.355 | 3311: subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
0.355 | 3311: stdout=subprocess.PIPE,
0.355 | 3311: stderr=subprocess.PIPE)
0.355 | 3311: cmd_stdout, cmd_stderr = subproc.communicate()
0.355 | 3311: if cmd_stdout:
0.355 | 3311: log.debug(cmd_stdout)
0.355 | 3311: if cmd_stderr and \\
0.355 | 3311: cmd_stderr != 'Error response from daemon: ' \\
0.355 | 3311: 'No such container: {}\
0.355 | 3311: '.format(name):
0.355 | 3311: log.debug(cmd_stderr)
0.355 | 3311:
0.355 | 3311: process_count = int(os.environ.get('PROCESS_COUNT',
0.355 | 3311: multiprocessing.cpu_count()))
0.355 | 3311: log = get_logger()
0.355 | 3311: log.info('Running docker-puppet')
0.355 | 3311: config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
0.355 | 3311: log.debug('CONFIG: %s' % config_file)
0.355 | 3311: with open(config_file) as f:
0.355 | 3311: json_data = json.load(f)
0.355 | 3311:
0.355 | 3311: # To save time we support configuring 'shared' services at the same
0.355 | 3311: # time. For example configuring all of the heat services
0.355 | 3311: # in a single container pass makes sense and will save some time.
0.355 | 3311: # To support this we merge shared settings together here.
0.355 | 3311: #
0.355 | 3311: # We key off of config_volume as this should be the same for a
0.355 | 3311: # given group of services. We are also now specifying the container
0.355 | 3311: # in which the services should be configured. This should match
0.355 | 3311: # in all instances where the volume name is also the same.
0.355 | 3311:
0.355 | 3311: configs = {}
0.355 | 3311:
0.355 | 3311: for service in (json_data or []):
0.355 | 3311: if service is None:
0.355 | 3311: continue
0.355 | 3311: if isinstance(service, dict):
0.355 | 3311: service = [
0.355 | 3311: service.get('config_volume'),
0.355 | 3311: service.get('puppet_tags'),
0.355 | 3311: service.get('step_config'),
0.355 | 3311: service.get('config_image'),
0.355 | 3311: service.get('volumes', []),
0.355 | 3311: ]
0.355 | 3311:
0.355 | 3311: config_volume = service[0] or ''
0.355 | 3311: puppet_tags = service[1] or ''
0.355 | 3311: manifest = service[2] or ''
0.355 | 3311: config_image = service[3] or ''
0.355 | 3311: volumes = service[4] if len(service) > 4 else []
0.355 | 3311:
0.355 | 3311: if not manifest or not config_image:
0.355 | 3311: continue
0.355 | 3311:
0.355 | 3311: log.info('config_volume %s' % config_volume)
0.355 | 3311: log.info('puppet_tags %s' % puppet_tags)
0.355 | 3311: log.info('manifest %s' % manifest)
0.355 | 3311: log.info('config_image %s' % config_image)
0.355 | 3311: log.info('volumes %s' % volumes)
0.355 | 3311: # We key off of config volume for all configs.
0.355 | 3311: if config_volume in configs:
0.355 | 3311: # Append puppet tags and manifest.
0.355 | 3311: log.info(\"Existing service, appending puppet tags and manifest\")
0.355 | 3311: if puppet_tags:
0.355 | 3311: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
0.355 | 3311: puppet_tags)
0.355 | 3311: if manifest:
0.355 | 3311: configs[config_volume][2] = '%s\
0.355 | 3311: %s' % (configs[config_volume][2],
0.355 | 3311: manifest)
0.355 | 3311: if configs[config_volume][3] != config_image:
0.355 | 3311: log.warn(\"Config containers do not match even though\"
0.355 | 3311: \" shared volumes are the same!\")
0.355 | 3311: else:
0.355 | 3311: log.info(\"Adding new service\")
0.355 | 3311: configs[config_volume] = service
0.355 | 3311:
0.355 | 3311: log.info('Service compilation completed.')
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
0.355 | 3311: log = get_logger()
0.355 | 3311: log.info('Started processing puppet configs')
0.355 | 3311: log.debug('config_volume %s' % config_volume)
0.355 | 3311: log.debug('puppet_tags %s' % puppet_tags)
0.355 | 3311: log.debug('manifest %s' % manifest)
0.355 | 3311: log.debug('config_image %s' % config_image)
0.355 | 3311: log.debug('volumes %s' % volumes)
0.355 | 3311: sh_script = '/var/lib/docker-puppet/docker-puppet.sh'
0.355 | 3311:
0.355 | 3311: with open(sh_script, 'w') as script_file:
0.355 | 3311: os.chmod(script_file.name, 0755)
0.355 | 3311: script_file.write(\"\"\"#!/bin/bash
0.355 | 3311: set -ex
0.355 | 3311: mkdir -p /etc/puppet
0.355 | 3311: cp -a /tmp/puppet-etc/* /etc/puppet
0.355 | 3311: rm -Rf /etc/puppet/ssl # not in use and causes permission errors
0.355 | 3311: echo \"{\\\\\"step\\\\\": $STEP}\" > /etc/puppet/hieradata/docker.json
0.355 | 3311: TAGS=\"\"
0.355 | 3311: if [ -n \"$PUPPET_TAGS\" ]; then
0.355 | 3311: TAGS=\"--tags \\\"$PUPPET_TAGS\\\"\"
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: # Create a reference timestamp to easily find all files touched by
0.355 | 3311: # puppet. The sync ensures we get all the files we want due to
0.355 | 3311: # different timestamp.
0.355 | 3311: touch /tmp/the_origin_of_time
0.355 | 3311: sync
0.355 | 3311:
0.355 | 3311: FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply \\
0.355 | 3311: --color=false --logdest syslog --logdest console $TAGS /etc/config.pp
0.355 | 3311:
0.355 | 3311: # Disables archiving
0.355 | 3311: if [ -z \"$NO_ARCHIVE\" ]; then
0.355 | 3311: archivedirs=(\"/etc\" \"/root\" \"/opt\" \"/var/lib/ironic/tftpboot\" \"/var/lib/ironic/httpboot\" \"/var/www\" \"/var/spool/cron\" \"/var/lib/nova/.ssh\")
0.355 | 3311: rsync_srcs=\"\"
0.355 | 3311: for d in \"${archivedirs[@]}\"; do
0.355 | 3311: if [ -d \"$d\" ]; then
0.355 | 3311: rsync_srcs+=\" $d\"
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
0.355 | 3311:
0.355 | 3311: # Also make a copy of files modified during puppet run
0.355 | 3311: # This is useful for debugging
0.355 | 3311: mkdir -p /var/lib/config-data/puppet-generated/${NAME}
0.355 | 3311: rsync -a -R -0 --delay-updates --delete-after \\
0.355 | 3311: --files-from=<(find $rsync_srcs -newer /tmp/the_origin_of_time -not -path '/etc/puppet*' -print0) \\
0.355 | 3311: / /var/lib/config-data/puppet-generated/${NAME}
0.355 | 3311:
0.355 | 3311: # Write a checksum of the config-data dir, this is used as a
0.355 | 3311: # salt to trigger container restart when the config changes
0.355 | 3311: tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
0.355 | 3311: fi
0.355 | 3311: \"\"\")
0.355 | 3311:
0.355 | 3311: with tempfile.NamedTemporaryFile() as tmp_man:
0.355 | 3311: with open(tmp_man.name, 'w') as man_file:
0.355 | 3311: man_file.write('include ::tripleo::packages\
0.355 | 3311: ')
0.355 | 3311: man_file.write(manifest)
0.355 | 3311:
0.355 | 3311: rm_container('docker-puppet-%s' % config_volume)
0.355 | 3311: pull_image(config_image)
0.355 | 3311:
0.355 | 3311: dcmd = ['/usr/bin/docker', 'run',
0.355 | 3311: '--user', 'root',
0.355 | 3311: '--name', 'docker-puppet-%s' % config_volume,
0.355 | 3311: '--health-cmd', '/bin/true',
0.355 | 3311: '--env', 'PUPPET_TAGS=%s' % puppet_tags,
0.355 | 3311: '--env', 'NAME=%s' % config_volume,
0.355 | 3311: '--env', 'HOSTNAME=%s' % short_hostname(),
0.355 | 3311: '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
0.355 | 3311: '--env', 'STEP=%s' % os.environ.get('STEP', '6'),
0.355 | 3311: '--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
0.355 | 3311: '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
0.355 | 3311: '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
0.355 | 3311: '--volume', '%s:/var/lib/config-data/:rw' % os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data'),
0.355 | 3311: '--volume', 'tripleo_logs:/var/log/tripleo/',
0.355 | 3311: # Syslog socket for puppet logs
0.355 | 3311: '--volume', '/dev/log:/dev/log',
0.355 | 3311: # OpenSSL trusted CA injection
0.355 | 3311: '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
0.355 | 3311: '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
0.355 | 3311: '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
0.355 | 3311: '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
0.355 | 3311: # script injection
0.355 | 3311: '--volume', '%s:%s:rw' % (sh_script, sh_script) ]
0.355 | 3311:
0.355 | 3311: for volume in volumes:
0.355 | 3311: if volume:
0.355 | 3311: dcmd.extend(['--volume', volume])
0.355 | 3311:
0.355 | 3311: dcmd.extend(['--entrypoint', sh_script])
0.355 | 3311:
0.355 | 3311: env = {}
0.355 | 3311: # NOTE(flaper87): Always copy the DOCKER_* environment variables as
0.355 | 3311: # they contain the access data for the docker daemon.
0.355 | 3311: for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
0.355 | 3311: env[k] = os.environ.get(k)
0.355 | 3311:
0.355 | 3311: if os.environ.get('NET_HOST', 'false') == 'true':
0.355 | 3311: log.debug('NET_HOST enabled')
0.355 | 3311: dcmd.extend(['--net', 'host', '--volume',
0.355 | 3311: '/etc/hosts:/etc/hosts:ro'])
0.355 | 3311: dcmd.append(config_image)
0.355 | 3311: log.debug('Running docker command: %s' % ' '.join(dcmd))
0.355 | 3311:
0.355 | 3311: subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
0.355 | 3311: stderr=subprocess.PIPE, env=env)
0.355 | 3311: cmd_stdout, cmd_stderr = subproc.communicate()
0.355 | 3311: if subproc.returncode != 0:
0.355 | 3311: log.error('Failed running docker-puppet.py for %s' % config_volume)
0.355 | 3311: if cmd_stdout:
0.355 | 3311: log.error(cmd_stdout)
0.355 | 3311: if cmd_stderr:
0.355 | 3311: log.error(cmd_stderr)
0.355 | 3311: else:
0.355 | 3311: if cmd_stdout:
0.355 | 3311: log.debug(cmd_stdout)
0.355 | 3311: if cmd_stderr:
0.355 | 3311: log.debug(cmd_stderr)
0.355 | 3311: # only delete successful runs, for debugging
0.355 | 3311: rm_container('docker-puppet-%s' % config_volume)
0.355 | 3311:
0.355 | 3311: log.info('Finished processing puppet configs')
0.355 | 3311: return subproc.returncode
0.355 | 3311:
0.355 | 3311: # Holds all the information for each process to consume.
0.355 | 3311: # Instead of starting them all linearly we run them using a process
0.355 | 3311: # pool. This creates a list of arguments for the above function
0.355 | 3311: # to consume.
0.355 | 3311: process_map = []
0.355 | 3311:
0.355 | 3311: for config_volume in configs:
0.355 | 3311:
0.355 | 3311: service = configs[config_volume]
0.355 | 3311: puppet_tags = service[1] or ''
0.355 | 3311: manifest = service[2] or ''
0.355 | 3311: config_image = service[3] or ''
0.355 | 3311: volumes = service[4] if len(service) > 4 else []
0.355 | 3311:
0.355 | 3311: if puppet_tags:
0.355 | 3311: puppet_tags = \"file,file_line,concat,augeas,cron,%s\" % puppet_tags
0.355 | 3311: else:
0.355 | 3311: puppet_tags = \"file,file_line,concat,augeas,cron\"
0.355 | 3311:
0.355 | 3311: process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
0.355 | 3311:
0.355 | 3311: for p in process_map:
0.355 | 3311: log.debug('- %s' % p)
0.355 | 3311:
0.355 | 3311: # Fire off processes to perform each configuration. Defaults
0.355 | 3311: # to the number of CPUs on the system.
0.355 | 3311: p = multiprocessing.Pool(process_count)
0.355 | 3311: returncodes = list(p.map(mp_puppet_config, process_map))
0.355 | 3311: config_volumes = [pm[0] for pm in process_map]
0.355 | 3311: success = True
0.355 | 3311: for returncode, config_volume in zip(returncodes, config_volumes):
0.355 | 3311: if returncode != 0:
0.355 | 3311: log.error('ERROR configuring %s' % config_volume)
0.355 | 3311: success = False
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: # Update the startup configs with the config hash we generated above
0.355 | 3311: config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data')
0.355 | 3311: log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)
0.355 | 3311: startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
0.355 | 3311: log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)
0.355 | 3311: infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
0.355 | 3311: for infile in infiles:
0.355 | 3311: with open(infile) as f:
0.355 | 3311: infile_data = json.load(f)
0.355 | 3311:
0.355 | 3311: for k, v in infile_data.iteritems():
0.355 | 3311: config_volume = match_config_volume(config_volume_prefix, v)
0.355 | 3311: if config_volume:
0.355 | 3311: config_hash = get_config_hash(config_volume_prefix, config_volume)
0.355 | 3311: if config_hash:
0.355 | 3311: env = v.get('environment', [])
0.355 | 3311: env.append(\"TRIPLEO_CONFIG_HASH=%s\" % config_hash)
0.355 | 3311: log.debug(\"Updating config hash for %s, config_volume=%s hash=%s\" % (k, config_volume, config_hash))
0.355 | 3311: infile_data[k]['environment'] = env
0.355 | 3311:
0.355 | 3311: outfile = os.path.join(os.path.dirname(infile), \"hashed-\" + os.path.basename(infile))
0.355 | 3311: with open(outfile, 'w') as out_f:
0.355 | 3311: os.chmod(out_f.name, 0600)
0.355 | 3311: json.dump(infile_data, out_f)
0.355 | 3311:
0.355 | 3311: if not success:
0.355 | 3311: sys.exit(1)
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Shared role data for the Heat services.\", \"value\": {\"service_name\": \"gnocchi_base\", \"config_settings\": {\"gnocchi::storage::ceph::ceph_pool\": {\"get_param\": \"GnocchiRbdPoolName\"}, \"gnocchi::storage::swift::swift_endpoint_type\": {\"get_param\": \"GnocchiStorageSwiftEndpointType\"}, \"gnocchi::storage::swift::swift_key\": {\"get_param\": \"GnocchiPassword\"}, \"gnocchi::statsd::archive_policy_name\": \"low\", \"gnocchi::storage::swift::swift_authurl\": {\"get_param\": [\"EndpointMap\", \"KeystoneV3Internal\", \"uri\"]}, \"gnocchi::statsd::user_id\": \"27c0d3f8-e7ee-42f0-8317-72237d1c5ae3\", \"gnocchi::storage::ceph::ceph_username\": {\"get_param\": \"CephClientUserName\"}, \"gnocchi::statsd::flush_delay\": 10, \"gnocchi::storage::ceph::ceph_keyring\": {\"list_join\": [\".\", [\"/etc/ceph/ceph\", \"client\", {\"get_param\": \"CephClientUserName\"}, \"keyring\"]]}, \"gnocchi::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"GnocchiDebug\"}]}, \"gnocchi::storage::swift::swift_auth_version\": 3, \"gnocchi::db::database_connection\": {\"make_url\": {\"username\": \"gnocchi\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"GnocchiPassword\"}, \"path\": \"/gnocchi\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"gnocchi::storage::metric_processing_delay\": {\"get_param\": \"MetricProcessingDelay\"}, \"gnocchi::statsd::project_id\": \"6c38cd8d-099a-4cb2-aecf-17be688e8616\", \"gnocchi::db::sync::extra_opts\": {\"str_replace\": {\"params\": {\"NUM_SACKS\": {\"get_param\": \"NumberOfStorageSacks\"}}, \"template\": \" --sacks-number NUM_SACKS\"}}, \"gnocchi::statsd::resource_id\": \"0a8b55df-f90f-491c-8cb9-7cdecec6fc26\", \"gnocchi_redis_password\": {\"get_param\": \"RedisPassword\"}, \"gnocchi::storage::swift::swift_user\": \"service:gnocchi\"}}}, \"aux_parameters\": {\"description\": \"Additional parameters referenced outside the base file\", \"value\": {\"gnocchi_indexer_backend\": {\"get_param\": \"GnocchiIndexerBackend\"}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"GnocchiDebug\"}, \"\"]}}, \"description\": \"Gnocchi service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NumberOfStorageSacks\": {\"default\": 128, \"type\": \"number\", \"description\": \"Number of storage sacks to create.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"MetricProcessingDelay\": {\"default\": 30, \"type\": \"number\", \"description\": \"Delay between processing metrics.\"}, \"GnocchiStorageSwiftEndpointType\": {\"default\": \"internalURL\", \"type\": \"string\", \"description\": \"Set to modify which endpoint type is gnocchi accessing swift from.\"}, \"GnocchiIndexerBackend\": {\"default\": \"mysql\", \"type\": \"string\", \"description\": \"The short name of the Gnocchi indexer backend to use.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RedisPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the redis service account.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"GnocchiDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Gnocchi services.\"}, \"CephClientUserName\": {\"default\": \"openstack\", \"type\": \"string\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"GnocchiPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the gnocchi service and db account.\"}, \"GnocchiRbdPoolName\": {\"default\": \"metrics\", \"type\": \"string\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/post_puppet_pacemaker.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Post-Puppet Config for Pacemaker deployments\", \"parameters\": {\"input_values\": {\"type\": \"json\", \"description\": \"input values for the software deployments\"}, \"servers\": {\"type\": \"json\"}}, \"resources\": {\"ControllerPostPuppetMaintenanceModeDeployment\": {\"type\": \"OS::Heat::SoftwareDeployments\", \"properties\": {\"input_values\": {\"get_param\": \"input_values\"}, \"config\": {\"get_resource\": \"ControllerPostPuppetMaintenanceModeConfig\"}, \"name\": \"ControllerPostPuppetMaintenanceModeDeployment\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerPostPuppetRestart\": {\"depends_on\": \"ControllerPostPuppetMaintenanceModeDeployment\", \"type\": \"OS::TripleO::Tasks::ControllerPostPuppetRestart\", \"properties\": {\"input_values\": {\"get_param\": \"input_values\"}, \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerPostPuppetMaintenanceModeConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": \"#!/bin/bash\
0.355 | 3311: pacemaker_status=$(systemctl is-active pacemaker)\
0.355 | 3311: \
0.355 | 3311: if [ \\\"$pacemaker_status\\\" = \\\"active\\\" ]; then\
0.355 | 3311: pcs property set maintenance-mode=false\
0.355 | 3311: fi\
0.355 | 3311: \"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/post_puppet_pacemaker_restart.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Post-Puppet restart config for Pacemaker deployments\", \"parameters\": {\"input_values\": {\"type\": \"json\", \"description\": \"input values for the software deployments\"}, \"servers\": {\"type\": \"json\"}}, \"resources\": {\"ControllerPostPuppetRestartDeployment\": {\"type\": \"OS::Heat::SoftwareDeployments\", \"properties\": {\"input_values\": {\"get_param\": \"input_values\"}, \"config\": {\"get_resource\": \"ControllerPostPuppetRestartConfig\"}, \"name\": \"ControllerPostPuppetRestartDeployment\", \"servers\": {\"get_param\": \"servers\"}}}, \"ControllerPostPuppetRestartConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"list_join\": [\"\", [{\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pacemaker_common_functions.sh\"}, {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pacemaker_resource_restart.sh\"}]]}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/ctlplane_vip.yaml": "{\"outputs\": {\"ip_subnet\": {\"description\": \"IP/Subnet CIDR for the ctlplane network.\", \"value\": {\"list_join\": [\"\", [{\"get_attr\": [\"VipPort\", \"fixed_ips\", 0, \"ip_address\"]}, \"/\", {\"str_split\": [\"/\", {\"get_attr\": [\"VipPort\", \"subnets\", 0, \"cidr\"]}, 1]}]]}}, \"ip_address_uri\": {\"description\": \"Virtual IP network IP (for compatibility with vip_v6.yaml)\", \"value\": {\"get_attr\": [\"VipPort\", \"fixed_ips\", 0, \"ip_address\"]}}, \"ip_address\": {\"description\": \"Virtual IP network IP\", \"value\": {\"get_attr\": [\"VipPort\", \"fixed_ips\", 0, \"ip_address\"]}}}, \"heat_template_version\": \"pike\", \"description\": \"Creates a port for a VIP on the undercloud ctlplane network. The IP address will be chosen automatically if FixedIPs is empty.\
0.355 | 3311: \", \"parameters\": {\"FixedIPs\": {\"default\": [], \"type\": \"json\", \"description\": \"Control the IP allocation for the VIP port. E.g. [{'ip_address':'1.2.3.4'}]\
0.355 | 3311: \"}, \"ServiceName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Name of the service to lookup\"}, \"ControlPlaneNetwork\": {\"default\": \"ctlplane\", \"type\": \"string\", \"description\": \"The name of the undercloud Neutron control plane\"}, \"PortName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Name of the port\"}, \"ControlPlaneIP\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address on the control plane\"}, \"NetworkName\": {\"default\": \"ctlplane\", \"type\": \"string\", \"description\": \"Name of the network where the VIP will be created\"}}, \"resources\": {\"VipPort\": {\"type\": \"OS::TripleO::Network::Ports::ControlPlaneVipPort\", \"properties\": {\"replacement_policy\": \"AUTO\", \"fixed_ips\": {\"get_param\": \"FixedIPs\"}, \"network\": {\"get_param\": \"ControlPlaneNetwork\"}, \"name\": {\"get_param\": \"PortName\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker/rabbitmq.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the RabbitMQ pacemaker role.\", \"value\": {\"metadata_settings\": {\"get_attr\": [\"RabbitMQServiceBase\", \"role_data\", \"metadata_settings\"]}, \"service_name\": \"rabbitmq\", \"step_config\": \"include ::tripleo::profile::pacemaker::rabbitmq\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"RabbitMQServiceBase\", \"role_data\", \"config_settings\"]}, {\"rabbitmq::service_manage\": false}]}, \"monitoring_subscription\": {\"get_attr\": [\"RabbitMQServiceBase\", \"role_data\", \"monitoring_subscription\"]}}}}, \"heat_template_version\": \"pike\", \"description\": \"RabbitMQ service with Pacemaker configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"RabbitMQServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/rabbitmq.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/memcached.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Memcached role.\", \"value\": {\"service_name\": \"memcached\", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionMemcached\"}, \"step_config\": \"include ::tripleo::profile::base::memcached\
0.355 | 3311: \", \"config_settings\": {\"memcached::listen_ip\": {\"get_param\": [\"ServiceNetMap\", \"MemcachedNetwork\"]}, \"tripleo.memcached.firewall_rules\": {\"121 memcached\": {\"dport\": 11211}}, \"memcached::max_memory\": {\"get_param\": \"MemcachedMaxMemory\"}}, \"service_config_settings\": {\"collectd\": {\"tripleo.collectd.plugins.memcached\": [\"memcached\"], \"collectd::plugin::memcached::instances\": {\"local\": {\"host\": \"%{hiera('memcached::listen_ip')}\", \"port\": 11211}}}}}}}, \"heat_template_version\": \"pike\", \"description\": \"Memcached service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"MonitoringSubscriptionMemcached\": {\"default\": \"overcloud-memcached\", \"type\": \"string\"}, \"MemcachedMaxMemory\": {\"default\": \"50%\", \"type\": \"string\", \"description\": \"The maximum amount of memory for memcached to be configured to use when installed. This can be either a percentage ('50%') or a fixed value ('2048').\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Ceilometer role.\", \"value\": {\"service_name\": \"ceilometer_base\", \"config_settings\": {\"ceilometer::dispatcher::gnocchi::url\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"uri\"]}, \"ceilometer::dispatcher::gnocchi::archive_policy\": {\"get_param\": \"GnocchiArchivePolicy\"}, \"ceilometer::agent::auth::auth_password\": {\"get_param\": \"CeilometerPassword\"}, \"ceilometer::agent::auth::auth_project_domain_name\": \"Default\", \"ceilometer::agent::auth::auth_tenant_name\": \"service\", \"ceilometer::keystone::authtoken::project_domain_name\": \"Default\", \"ceilometer::telemetry_secret\": {\"get_param\": \"CeilometerMeteringSecret\"}, \"ceilometer::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"ceilometer::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"ceilometer::agent::notification::pipeline_publishers\": {\"get_param\": \"PipelinePublishers\"}, \"ceilometer::agent::auth::auth_endpoint_type\": \"internalURL\", \"ceilometer::keystone::authtoken::user_domain_name\": \"Default\", \"ceilometer::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"ceilometer::agent::notification::manage_event_pipeline\": {\"get_param\": \"ManageEventPipeline\"}, \"ceilometer::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"ceilometer::snmpd_readonly_username\": {\"get_param\": \"SnmpdReadonlyUserName\"}, \"ceilometer::snmpd_readonly_user_password\": {\"get_param\": \"SnmpdReadonlyUserPassword\"}, \"ceilometer::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"ceilometer::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"CeilometerDebug\"}]}, \"ceilometer::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"ceilometer::agent::notification::manage_pipeline\": {\"get_param\": \"ManagePipeline\"}, \"ceilometer::agent::notification::event_pipeline_publishers\": {\"get_param\": \"EventPipelinePublishers\"}, \"ceilometer::keystone::authtoken::password\": {\"get_param\": \"CeilometerPassword\"}, \"ceilometer::dispatcher::gnocchi::resources_definition_file\": \"gnocchi_resources.yaml\", \"ceilometer::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"ceilometer::agent::auth::auth_user_domain_name\": \"Default\", \"ceilometer::dispatcher::gnocchi::filter_project\": \"service\", \"ceilometer::rabbit_heartbeat_timeout_threshold\": 60, \"ceilometer::keystone::authtoken::project_name\": \"service\", \"ceilometer::agent::auth::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"ceilometer::agent::auth::auth_region\": {\"get_param\": \"KeystoneRegion\"}}, \"service_config_settings\": {\"keystone\": {\"ceilometer::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"uri\"]}, \"ceilometer::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"uri\"]}, \"ceilometer::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"ceilometer_auth_enabled\": true, \"ceilometer::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"uri\"]}, \"ceilometer::keystone::auth::password\": {\"get_param\": \"CeilometerPassword\"}, \"ceilometer::keystone::auth::tenant\": \"service\", \"ceilometer::keystone::auth::configure_endpoint\": {\"get_param\": \"CeilometerApiEndpoint\"}}, \"mysql\": {\"ceilometer::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"ceilometer::db::mysql::password\": {\"get_param\": \"CeilometerPassword\"}, \"ceilometer::db::mysql::dbname\": \"ceilometer\", \"ceilometer::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"ceilometer::db::mysql::user\": \"ceilometer\"}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"CeilometerDebug\"}, \"\"]}}, \"description\": \"OpenStack Ceilometer service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ManageEventPipeline\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to manage event_pipeline.yaml.\"}, \"CeilometerWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Ceilometer service.\"}, \"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"GnocchiArchivePolicy\": {\"default\": \"low\", \"type\": \"string\", \"description\": \"archive policy to use with gnocchi backend\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"SnmpdReadonlyUserPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The user password for SNMPd with readonly rights running on all Overcloud nodes\"}, \"PipelinePublishers\": {\"default\": [\"gnocchi://\"], \"type\": \"comma_delimited_list\", \"description\": \"A list of publishers to put in pipeline.yaml. When the collector is used, override this with notifier:// publisher. Set ManagePipeline to true for override to take effect.\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ManagePipeline\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to manage pipeline.yaml.\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"CeilometerDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Ceilometer services.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"SnmpdReadonlyUserName\": {\"default\": \"ro_snmp_user\", \"type\": \"string\", \"description\": \"The user name for SNMPd with readonly rights running on all Overcloud nodes\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"CeilometerMeteringSecret\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Secret shared by the ceilometer services.\"}, \"CeilometerApiEndpoint\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to create or skip API endpoint. Set this to false, if you choose to disable Ceilometer API service.\"}, \"EventPipelinePublishers\": {\"default\": [\"gnocchi://\", \"panko://\"], \"type\": \"comma_delimited_list\", \"description\": \"A list of publishers to put in event_pipeline.yaml. When the collector is used, override this with notifier:// publisher. If zaqar is enabled, you can also publish to a zaqar queue by including \\\"zaqar://?queue=queue_name\\\" in this list. Set ManageEventPipeline to true for override to take effect.\
0.355 | 3311: \"}, \"CeilometerPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the ceilometer service account.\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/snmp.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the SNMP services\", \"value\": {\"service_name\": \"snmp\", \"step_config\": \"include ::tripleo::profile::base::snmp\
0.355 | 3311: \", \"config_settings\": {\"tripleo.snmp.firewall_rules\": {\"127 snmp\": {\"dport\": 161, \"proto\": \"udp\"}}, \"tripleo::profile::base::snmp::snmpd_password\": {\"get_param\": \"SnmpdReadonlyUserPassword\"}, \"snmp::agentaddress\": {\"get_param\": \"SnmpdBindHost\"}, \"snmp::snmpd_options\": {\"get_param\": \"SnmpdOptions\"}, \"tripleo::profile::base::snmp::snmpd_user\": {\"get_param\": \"SnmpdReadonlyUserName\"}}, \"upgrade_tasks\": [{\"name\": \"Stop snmp service\", \"service\": \"name=snmpd state=stopped\", \"tags\": \"step1\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"SNMP client configured with Puppet, to facilitate Ceilometer Hardware monitoring in the undercloud. This service is required to enable hardware monitoring.\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"SnmpdBindHost\": {\"default\": [\"udp:161\", \"udp6:[::1]:161\"], \"type\": \"comma_delimited_list\", \"description\": \"An array of bind host addresses on which SNMP daemon will listen.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"SnmpdReadonlyUserName\": {\"default\": \"ro_snmp_user\", \"type\": \"string\", \"description\": \"The user name for SNMPd with readonly rights running on all Overcloud nodes\"}, \"SnmpdOptions\": {\"default\": \"-LS0-5d\", \"type\": \"string\", \"description\": \"A string containing the commandline options passed to snmpd\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"SnmpdReadonlyUserPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The user password for SNMPd with readonly rights running on all Overcloud nodes\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/keystone.yaml": "{\"parameter_groups\": [{\"description\": \"The following parameters are deprecated and will be removed. They should not\
0.355 | 3311: be relied on for new deployments. If you have concerns regarding deprecated\
0.355 | 3311: parameters, please contact the TripleO development team on IRC or the\
0.355 | 3311: OpenStack mailing list.\
0.355 | 3311: \", \"parameters\": [\"KeystoneFernetKey0\", \"KeystoneFernetKey1\", \"KeystoneNotificationDriver\"], \"label\": \"deprecated\"}], \"heat_template_version\": \"pike\", \"description\": \"OpenStack Keystone service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"KeystoneFernetKey1\": {\"default\": \"\", \"type\": \"string\", \"description\": \"(DEPRECATED) The second Keystone fernet key. Must be a valid key.\"}, \"KeystoneFernetKey0\": {\"default\": \"\", \"type\": \"string\", \"description\": \"(DEPRECATED) The first Keystone fernet key. Must be a valid key.\"}, \"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"MonitoringSubscriptionKeystone\": {\"default\": \"overcloud-keystone\", \"type\": \"string\"}, \"KeystoneDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Keystone service.\"}, \"KeystoneLDAPDomainEnable\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Trigger to call ldap_backend puppet keystone define.\"}, \"KeystoneSSLCertificateKey\": {\"default\": \"\", \"hidden\": true, \"type\": \"string\", \"description\": \"Keystone key for signing tokens.\"}, \"KeystoneNotificationFormat\": {\"default\": \"basic\", \"type\": \"string\", \"description\": \"The Keystone notification format\", \"constraints\": [{\"allowed_values\": [\"basic\", \"cadf\"]}]}, \"KeystoneCronTokenFlushHour\": {\"default\": \"*\", \"type\": \"comma_delimited_list\", \"description\": \"Cron to purge expired tokens - Hour\
0.355 | 3311: \"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"KeystonePolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Keystone.\
0.355 | 3311: e.g. { keystone-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"KeystoneTokenProvider\": {\"default\": \"fernet\", \"type\": \"string\", \"description\": \"The keystone token format\", \"constraints\": [{\"allowed_values\": [\"uuid\", \"fernet\"]}]}, \"AdminEmail\": {\"default\": \"admin@example.com\", \"hidden\": true, \"type\": \"string\", \"description\": \"The email for the keystone admin account.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"KeystoneCronTokenFlushWeekday\": {\"default\": \"*\", \"type\": \"comma_delimited_list\", \"description\": \"Cron to purge expired tokens - Week Day\
0.355 | 3311: \"}, \"KeystoneEnableDBPurge\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to create cron job for purging soft deleted rows in Keystone database.\
0.355 | 3311: \"}, \"KeystoneFernetKeys\": {\"type\": \"json\", \"description\": \"Mapping containing keystone's fernet keys and their paths.\"}, \"KeystoneCronTokenFlushMinute\": {\"default\": \"1\", \"type\": \"comma_delimited_list\", \"description\": \"Cron to purge expired tokens - Minute\
0.355 | 3311: \"}, \"KeystoneNotificationDriver\": {\"default\": [\"messaging\"], \"type\": \"comma_delimited_list\", \"description\": \"Comma-separated list of Oslo notification drivers used by Keystone\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"KeystoneLoggingSource\": {\"default\": {\"path\": \"/var/log/keystone/keystone.log\", \"tag\": \"openstack.keystone\"}, \"type\": \"json\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"KeystoneCronTokenFlushUser\": {\"default\": \"keystone\", \"type\": \"string\", \"description\": \"Cron to purge expired tokens - User\
0.355 | 3311: \"}, \"KeystoneCredential0\": {\"type\": \"string\", \"description\": \"The first Keystone credential key. Must be a valid key.\"}, \"KeystoneCredential1\": {\"type\": \"string\", \"description\": \"The second Keystone credential key. Must be a valid key.\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"KeystoneCronTokenFlushEnsure\": {\"default\": \"present\", \"type\": \"string\", \"description\": \"Cron to purge expired tokens - Ensure\
0.355 | 3311: \"}, \"KeystoneFernetMaxActiveKeys\": {\"default\": 5, \"type\": \"number\", \"description\": \"The maximum active keys in the keystone fernet key repository.\"}, \"KeystoneCronTokenFlushMonthday\": {\"default\": \"*\", \"type\": \"comma_delimited_list\", \"description\": \"Cron to purge expired tokens - Month Day\
0.355 | 3311: \"}, \"ManageKeystoneFernetKeys\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether TripleO should manage the keystone fernet keys or not. If set to true, the fernet keys will get the values from the saved keys repository in mistral (the KeystoneFernetKeys variable). If set to false, only the stack creation initializes the keys, but subsequent updates won't touch them.\"}, \"AdminToken\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The keystone auth secret and db password.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"KeystoneSSLCertificate\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Keystone certificate for verifying token validity.\"}, \"KeystoneCronTokenFlushMonth\": {\"default\": \"*\", \"type\": \"comma_delimited_list\", \"description\": \"Cron to purge expired tokens - Month\
0.355 | 3311: \"}, \"KeystoneWorkers\": {\"default\": \"%{::os_workers}\", \"type\": \"string\", \"description\": \"Set the number of workers for keystone::wsgi::apache\"}, \"AdminPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the keystone admin account, used for monitoring, querying neutron etc.\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"KeystoneCronTokenFlushMaxDelay\": {\"default\": 0, \"type\": \"number\", \"description\": \"Cron to purge expired tokens - Max Delay\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"KeystoneLDAPBackendConfigs\": {\"default\": {}, \"hidden\": true, \"type\": \"json\", \"description\": \"Hash containing the configurations for the LDAP backends configured in keystone.\"}, \"KeystoneCronTokenFlushDestination\": {\"default\": \"/var/log/keystone/keystone-tokenflush.log\", \"type\": \"string\", \"description\": \"Cron to purge expired tokens - Log destination\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Keystone role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"keystone::wsgi::apache::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"keystone::wsgi::apache::admin_bind_host\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}, \"keystone::enable_credential_setup\": true, \"keystone::fernet_keys\": {\"get_param\": \"KeystoneFernetKeys\"}, \"keystone::fernet_max_active_keys\": {\"get_param\": \"KeystoneFernetMaxActiveKeys\"}, \"keystone::endpoint::version\": \"\", \"keystone::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"keystone::endpoint::internal_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"keystone::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"keystone::endpoint::region\": {\"get_param\": \"KeystoneRegion\"}, \"keystone::database_connection\": {\"make_url\": {\"username\": \"keystone\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"AdminToken\"}, \"path\": \"/keystone\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"keystone::notification_format\": {\"get_param\": \"KeystoneNotificationFormat\"}, \"keystone::cron::token_flush::ensure\": {\"get_param\": \"KeystoneCronTokenFlushEnsure\"}, \"keystone::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"tripleo.keystone.firewall_rules\": {\"111 keystone\": {\"dport\": [5000, 13000, 35357, 13357]}}, \"keystone::admin_token\": {\"get_param\": \"AdminToken\"}, \"keystone_ssl_certificate\": {\"get_param\": \"KeystoneSSLCertificate\"}, \"keystone::cron::token_flush::month\": {\"get_param\": \"KeystoneCronTokenFlushMonth\"}, \"keystone::admin_bind_host\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"keystone::endpoint::public_url\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"uri_no_suffix\"]}, \"keystone::token_provider\": {\"get_param\": \"KeystoneTokenProvider\"}, \"keystone::config::keystone_config\": {\"ec2/driver\": {\"value\": \"keystone.contrib.ec2.backends.sql.Ec2\"}}, \"keystone::cron::token_flush::destination\": {\"get_param\": \"KeystoneCronTokenFlushDestination\"}, \"keystone::roles::admin::service_tenant\": \"service\", \"keystone::cron::token_flush::user\": {\"get_param\": \"KeystoneCronTokenFlushUser\"}, \"keystone::fernet_replace_keys\": {\"get_param\": \"ManageKeystoneFernetKeys\"}, \"keystone_ssl_certificate_key\": {\"get_param\": \"KeystoneSSLCertificateKey\"}, \"keystone_enable_db_purge\": {\"get_param\": \"KeystoneEnableDBPurge\"}, \"keystone::endpoint::admin_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"uri_no_suffix\"]}, \"keystone::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"KeystoneDebug\"}]}, \"keystone::service_name\": \"httpd\", \"keystone::cron::token_flush::hour\": {\"get_param\": \"KeystoneCronTokenFlushHour\"}, \"keystone::roles::admin::password\": {\"get_param\": \"AdminPassword\"}, \"keystone::roles::admin::email\": {\"get_param\": \"AdminEmail\"}, \"keystone::credential_keys\": {\"/etc/keystone/credential-keys/1\": {\"content\": {\"get_param\": \"KeystoneCredential1\"}}, \"/etc/keystone/credential-keys/0\": {\"content\": {\"get_param\": \"KeystoneCredential0\"}}}, \"keystone::cron::token_flush::minute\": {\"get_param\": \"KeystoneCronTokenFlushMinute\"}, \"keystone::cron::token_flush::maxdelay\": {\"get_param\": \"KeystoneCronTokenFlushMaxDelay\"}, \"keystone::admin_password\": {\"get_param\": \"AdminPassword\"}, \"keystone::enable_proxy_headers_parsing\": true, \"keystone::db::database_max_retries\": -1, \"keystone::wsgi::apache::servername_admin\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"keystone::db::database_db_max_retries\": -1, \"keystone::policy::policies\": {\"get_param\": \"KeystonePolicies\"}, \"keystone::enable_ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"keystone::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"keystone::wsgi::apache::threads\": 1, \"keystone::wsgi::apache::workers\": {\"get_param\": \"KeystoneWorkers\"}, \"keystone::public_bind_host\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"keystone::cron::token_flush::monthday\": {\"get_param\": \"KeystoneCronTokenFlushMonthday\"}, \"keystone::roles::admin::admin_tenant\": \"admin\", \"keystone::rabbit_heartbeat_timeout_threshold\": 60, \"keystone::enable_fernet_setup\": {\"if\": [\"keystone_fernet_tokens\", true, false]}, \"keystone::wsgi::apache::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"keystone::cron::token_flush::weekday\": {\"get_param\": \"KeystoneCronTokenFlushWeekday\"}, \"keystone::wsgi::apache::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}, \"keystone::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}}, {\"if\": [\"keystone_ldap_domain_enabled\", {\"tripleo::profile::base::keystone::ldap_backends_config\": {\"get_param\": \"KeystoneLDAPBackendConfigs\"}, \"tripleo::profile::base::keystone::ldap_backend_enable\": true, \"keystone::using_domain_config\": true}, {}]}]}, \"upgrade_tasks\": {\"list_concat\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"upgrade_tasks\"]}, [{\"name\": \"Stop keystone service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}]]}, \"logging_groups\": [\"keystone\"], \"service_name\": \"keystone\", \"step_config\": \"include ::tripleo::profile::base::keystone\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionKeystone\"}, \"service_config_settings\": {\"horizon\": {\"if\": [\"keystone_ldap_domain_enabled\", {\"horizon::keystone_multidomain_support\": true, \"horizon::keystone_default_domain\": \"Default\"}, {}]}, \"mysql\": {\"keystone::db::mysql::user\": \"keystone\", \"keystone::db::mysql::dbname\": \"keystone\", \"keystone::db::mysql::password\": {\"get_param\": \"AdminToken\"}, \"keystone::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"keystone::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}}}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"KeystoneLoggingSource\"}}}}, \"conditions\": {\"keystone_ldap_domain_enabled\": {\"equals\": [{\"get_param\": \"KeystoneLDAPDomainEnable\"}, true]}, \"keystone_fernet_tokens\": {\"equals\": [{\"get_param\": \"KeystoneTokenProvider\"}, \"fernet\"]}, \"service_debug_unset\": {\"equals\": [{\"get_param\": \"KeystoneDebug\"}, \"\"]}}, \"resources\": {\"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/endpoints/endpoint_map.yaml": "{\"outputs\": {\"endpoint_map\": {\"value\": {\"GnocchiInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiInternal\", \"port\"]}}}}, \"AodhPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"port\"]}}}}, \"KeystonePublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2.0\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"port\"]}}}}, \"NovaInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2.1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaInternal\", \"port\"]}}}}, \"AodhInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"port\"]}}}}, \"CeilometerAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerAdmin\", \"port\"]}}}}, \"GlanceAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"port\"]}}}}, \"CinderV2Internal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v2/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"HeatCfnInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnInternal\", \"port\"]}}}}, \"HeatInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"NeutronPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"port\"]}}}}, \"ManilaV1Public\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"TackerInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"TackerInternal\", \"port\"]}}}}, \"SwiftAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"port\"]}}}}, \"KeystoneV3Public\": {\"port\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v3\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystonePublic\", \"port\"]}}}}, \"PankoPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"PankoPublic\", \"port\"]}}}}, \"CinderV2Admin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v2/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"HeatCfnPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"port\"]}}}}, \"IronicAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicAdmin\", \"port\"]}}}}, \"BarbicanInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanInternal\", \"port\"]}}}}, \"CongressInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CongressInternal\", \"port\"]}}}}, \"IronicInspectorPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorPublic\", \"port\"]}}}}, \"SwiftInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/AUTH_\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"ManilaV1Admin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"NovaVNCProxyPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"port\"]}}}}, \"MistralPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"port\"]}}}}, \"IronicInspectorInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorInternal\", \"port\"]}}}}, \"CeilometerPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerPublic\", \"port\"]}}}}, \"MistralAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"port\"]}}}}, \"CinderV3Internal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v3/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"BarbicanPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanPublic\", \"port\"]}}}}, \"ZaqarPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarPublic\", \"port\"]}}}}, \"GnocchiAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GnocchiApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiAdmin\", \"port\"]}}}}, \"CephRgwPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/swift/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwPublic\", \"port\"]}}}}, \"PankoAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"PankoAdmin\", \"port\"]}}}}, \"SaharaAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1.1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"CinderPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"IronicPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicPublic\", \"port\"]}}}}, \"KeystoneV3Internal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v3\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"port\"]}}}}, \"SwiftPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/AUTH_\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"GnocchiPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GnocchiPublic\", \"port\"]}}}}, \"Ec2ApiAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiAdmin\", \"port\"]}}}}, \"CinderAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"GlanceInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"port\"]}}}}, \"HeatPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"ZaqarAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarAdmin\", \"port\"]}}}}, \"Ec2ApiPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiPublic\", \"port\"]}}}}, \"NovaPlacementAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/placement\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"port\"]}}}}, \"MistralInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"port\"]}}}}, \"HeatAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"OctaviaPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaPublic\", \"port\"]}}}}, \"NovaVNCProxyAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyAdmin\", \"port\"]}}}}, \"HeatCfnAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnAdmin\", \"port\"]}}}}, \"CinderV3Admin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v3/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderAdmin\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"ManilaV1Internal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"ManilaInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v2/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"SaharaInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1.1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"SwiftS3Public\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftPublic\", \"port\"]}}}}, \"SaharaPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1.1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"GlancePublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"port\"]}}}}, \"IronicInspectorAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicInspectorNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInspectorAdmin\", \"port\"]}}}}, \"ManilaAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v2/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ManilaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaAdmin\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"ZaqarInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarInternal\", \"port\"]}}}}, \"SwiftS3Internal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftInternal\", \"port\"]}}}}, \"AodhAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"AodhApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"port\"]}}}}, \"PankoInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"PankoInternal\", \"port\"]}}}}, \"NeutronAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"port\"]}}}}, \"OctaviaAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaAdmin\", \"port\"]}}}}, \"ZaqarWebSocketPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketPublic\", \"port\"]}}}}, \"CeilometerInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CeilometerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CeilometerInternal\", \"port\"]}}}}, \"OctaviaInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"OctaviaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"OctaviaInternal\", \"port\"]}}}}, \"CongressAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CongressApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CongressAdmin\", \"port\"]}}}}, \"NovaPlacementPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/placement\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"port\"]}}}}, \"NovaPlacementInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/placement\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"port\"]}}}}, \"NovaAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2.1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaAdmin\", \"port\"]}}}}, \"Ec2ApiInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"Ec2ApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"Ec2ApiInternal\", \"port\"]}}}}, \"MysqlInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"port\"]}}}}, \"HorizonPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/dashboard\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HorizonPublic\", \"port\"]}}}}, \"ZaqarWebSocketInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketInternal\", \"port\"]}}}}, \"TackerPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"TackerPublic\", \"port\"]}}}}, \"NeutronInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"port\"]}}}}, \"CinderV3Public\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v3/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"CephRgwInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/swift/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwInternal\", \"port\"]}}}}, \"KeystoneV3Admin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v3\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"port\"]}}}}, \"NovaPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2.1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"port\"]}}}}, \"ZaqarWebSocketAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"ZaqarApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ZaqarWebSocketAdmin\", \"port\"]}}}}, \"BarbicanAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"BarbicanApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"BarbicanAdmin\", \"port\"]}}}}, \"ManilaPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v2/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"ManilaPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"TackerAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"TackerApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"TackerAdmin\", \"port\"]}}}}, \"CongressPublic\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CongressPublic\", \"port\"]}}}}, \"CephRgwAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/swift/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CephRgwNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CephRgwAdmin\", \"port\"]}}}}, \"CinderV2Public\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v2/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"PublicNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderPublic\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"IronicInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v1\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"IronicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"IronicInternal\", \"port\"]}}}}, \"KeystoneAdmin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2.0\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystoneAdminApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"port\"]}}}}, \"CinderInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}}}, \"uri\": {\"list_join\": [\"\", [{\"make_url\": {\"path\": \"/v1/\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"CinderApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"CinderInternal\", \"port\"]}}}, \"%(tenant_id)s\"]]}}, \"NovaVNCProxyInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyInternal\", \"port\"]}}}}, \"SwiftS3Admin\": {\"port\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"port\"]}}}, \"uri\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"SwiftProxyNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"SwiftAdmin\", \"port\"]}}}}, \"KeystoneInternal\": {\"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"port\"]}, \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"protocol\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"protocol\"]}, \"host_nobrackets\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"uri_no_suffix\": {\"make_url\": {\"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"port\"]}}}, \"uri\": {\"make_url\": {\"path\": \"/v2.0\", \"host\": {\"str_replace\": {\"params\": {\"IP_ADDRESS\": {\"get_param\": [\"NetIpMap\", {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}}, \"template\": \"NETWORK_uri\"}}]}, \"CLOUDNAME\": {\"get_param\": [\"CloudEndpoints\", {\"get_param\": [\"ServiceNetMap\", \"KeystonePublicApiNetwork\"]}]}}, \"template\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}}}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"port\"]}}}}}}}, \"heat_template_version\": \"pike\", \"description\": \"A map of OpenStack endpoints. Since the endpoints are URLs, we need to have brackets around IPv6 IP addresses. The inputs to these parameters come from net_ip_uri_map, which will include these brackets in IPv6 addresses.\", \"parameters\": {\"NetIpMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"The Net IP map\"}, \"EndpointMap\": {\"default\": {\"GnocchiInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8041\"}, \"KeystonePublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"5000\"}, \"NovaInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8774\"}, \"AodhInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8042\"}, \"CeilometerAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8777\"}, \"GlanceAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9292\"}, \"HeatCfnInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8000\"}, \"HeatInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8004\"}, \"NeutronPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9696\"}, \"TackerInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9890\"}, \"CinderPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8776\"}, \"PankoPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8977\"}, \"HeatCfnPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8000\"}, \"IronicAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"6385\"}, \"BarbicanInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9311\"}, \"CongressInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"1789\"}, \"GnocchiPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8041\"}, \"SwiftInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8080\"}, \"NovaVNCProxyPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"6080\"}, \"ZaqarInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8888\"}, \"MistralPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8989\"}, \"IronicInspectorInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"5050\"}, \"CeilometerPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8777\"}, \"MistralAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8989\"}, \"TackerPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9890\"}, \"BarbicanPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9311\"}, \"ZaqarPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8888\"}, \"GnocchiAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8041\"}, \"CephRgwAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8080\"}, \"CephRgwPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8080\"}, \"MysqlInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"mysql+pymysql\", \"port\": \"3306\"}, \"PankoAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8977\"}, \"SaharaAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8386\"}, \"IronicPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"6385\"}, \"SwiftPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8080\"}, \"IronicInspectorPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"5050\"}, \"ZaqarWebSocketAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"ws\", \"port\": \"9000\"}, \"CinderAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8776\"}, \"GlanceInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9292\"}, \"HeatPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8004\"}, \"ZaqarAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8888\"}, \"Ec2ApiPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8788\"}, \"NovaPlacementAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8778\"}, \"MistralInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8989\"}, \"HeatAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8004\"}, \"OctaviaPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9876\"}, \"NovaVNCProxyAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"6080\"}, \"HeatCfnAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8000\"}, \"SwiftAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8080\"}, \"ManilaInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8786\"}, \"SaharaInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8386\"}, \"GlancePublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9292\"}, \"ManilaAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8786\"}, \"AodhPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8042\"}, \"AodhAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8042\"}, \"ManilaPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8786\"}, \"PankoInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8977\"}, \"NeutronAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9696\"}, \"OctaviaAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9876\"}, \"ZaqarWebSocketPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"ws\", \"port\": \"9000\"}, \"CeilometerInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8777\"}, \"OctaviaInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9876\"}, \"CongressAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"1789\"}, \"NovaPlacementPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8778\"}, \"NovaAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8774\"}, \"Ec2ApiInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8788\"}, \"SaharaPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8386\"}, \"ZaqarWebSocketInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"ws\", \"port\": \"9000\"}, \"NeutronInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9696\"}, \"CephRgwInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8080\"}, \"NovaPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8774\"}, \"Ec2ApiAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8788\"}, \"BarbicanAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9311\"}, \"HorizonPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"80\"}, \"TackerAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"9890\"}, \"CongressPublic\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"1789\"}, \"NovaPlacementInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8778\"}, \"IronicInspectorAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"5050\"}, \"IronicInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"6385\"}, \"KeystoneAdmin\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"35357\"}, \"CinderInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"8776\"}, \"NovaVNCProxyInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"6080\"}, \"KeystoneInternal\": {\"host\": \"IP_ADDRESS\", \"protocol\": \"http\", \"port\": \"5000\"}}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"The Service Net map\"}, \"CloudEndpoints\": {\"default\": {}, \"type\": \"json\", \"description\": \"A map containing the DNS names for the different endpoints (external, internal_api, etc.)\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sahara-engine.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Sahara Engine role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionSaharaEngine\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"SaharaBase\", \"role_data\", \"config_settings\"]}]}, \"upgrade_tasks\": [{\"name\": \"Stop sahara_engine service\", \"service\": \"name=openstack-sahara-engine state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"sahara\"], \"service_name\": \"sahara_engine\", \"logging_source\": {\"get_param\": \"SaharaEngineLoggingSource\"}, \"step_config\": \"include ::tripleo::profile::base::sahara::engine\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Sahara Engine service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"MonitoringSubscriptionSaharaEngine\": {\"default\": \"overcloud-sahara-engine\", \"type\": \"string\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"SaharaEngineLoggingSource\": {\"default\": {\"path\": \"/var/log/sahara/sahara-engine.log\", \"tag\": \"openstack.sahara.engine\"}, \"type\": \"json\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"SaharaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sahara-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Apache service configured with Puppet. Note this is typically included automatically via other services which run via Apache.\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ApacheServerLimit\": {\"default\": 256, \"type\": \"number\", \"description\": \"Maximum number of Apache processes.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"InternalTLSCAFile\": {\"default\": \"/etc/ipa/ca.crt\", \"type\": \"string\", \"description\": \"Specifies the default CA cert to use if TLS is used for services in the internal network.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"ApacheMaxRequestWorkers\": {\"default\": 256, \"type\": \"number\", \"description\": \"Maximum number of simultaneously processed requests.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Apache role.\", \"value\": {\"metadata_settings\": {\"if\": [\"internal_tls_enabled\", {\"repeat\": {\"for_each\": {\"$NETWORK\": {\"get_attr\": [\"ApacheNetworks\", \"value\"]}}, \"template\": [{\"type\": \"node\", \"network\": \"$NETWORK\", \"service\": \"HTTP\"}]}}, null]}, \"service_name\": \"apache\", \"config_settings\": {\"map_merge\": [{\"apache::server_signature\": \"Off\", \"apache::ip\": {\"get_param\": [\"ServiceNetMap\", \"ApacheNetwork\"]}, \"apache::mod::prefork::serverlimit\": {\"get_param\": \"ApacheServerLimit\"}, \"apache::server_tokens\": \"Prod\", \"apache::default_vhost\": false, \"apache::mod::prefork::maxclients\": {\"get_param\": \"ApacheMaxRequestWorkers\"}, \"apache::mod::remoteip::proxy_ips\": [\"%{hiera('apache_remote_proxy_ips_network')}\"], \"apache_remote_proxy_ips_network\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"ApacheNetwork\"]}}, \"template\": \"NETWORK_subnet\"}}}, {\"if\": [\"internal_tls_enabled\", {\"apache_certificates_specs\": {\"map_merge\": {\"repeat\": {\"for_each\": {\"NETWORK\": {\"get_attr\": [\"ApacheNetworks\", \"value\"]}}, \"template\": {\"httpd-NETWORK\": {\"service_certificate\": \"/etc/pki/tls/certs/httpd/httpd-NETWORK.crt\", \"hostname\": \"%{hiera('fqdn_NETWORK')}\", \"service_key\": \"/etc/pki/tls/private/httpd/httpd-NETWORK.key\", \"principal\": \"HTTP/%{hiera('fqdn_NETWORK')}\"}}}}}, \"tripleo::certmonger::apache_dirs::certificate_dir\": \"/etc/pki/tls/certs/httpd\", \"generate_service_certificates\": true, \"tripleo::certmonger::apache_dirs::key_dir\": \"/etc/pki/tls/private/httpd\", \"apache::mod::ssl::ssl_ca\": {\"get_param\": \"InternalTLSCAFile\"}}, {}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"httpd_enabled\", \"command\": \"systemctl is-enabled httpd\", \"name\": \"Check if httpd is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'httpd' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"httpd_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service httpd is running\", \"tags\": \"step0,validation\"}, {\"yum\": \"name=mod_ssl state=latest\", \"name\": \"Ensure mod_ssl package is installed\", \"tags\": \"step3\"}]}}}, \"conditions\": {\"internal_tls_enabled\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}}, \"resources\": {\"ApacheNetworks\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"value\": {\"yaql\": {\"expression\": \"list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)\", \"data\": {\"map\": {\"get_param\": \"ServiceNetMap\"}}}}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/redis.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Redis service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the redis role.\", \"value\": {\"metadata_settings\": {\"if\": [\"use_tls_proxy\", [{\"type\": \"vip\", \"network\": {\"get_param\": [\"ServiceNetMap\", \"RabbitmqNetwork\"]}, \"service\": \"redis\"}], null]}, \"service_name\": \"redis\", \"step_config\": \"include ::tripleo::profile::base::database::redis\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"RedisBase\", \"role_data\", \"config_settings\"]}, {\"tripleo::profile::base::database::redis::tls_proxy_fqdn\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"RedisNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"tripleo::profile::base::database::redis::tls_proxy_port\": 6379, \"tripleo.redis.firewall_rules\": {\"108 redis\": {\"dport\": [6379, 26379]}}, \"tripleo::profile::base::database::redis::tls_proxy_bind_ip\": {\"get_param\": [\"ServiceNetMap\", \"RedisNetwork\"]}}, {\"if\": [\"use_tls_proxy\", {\"redis_certificate_specs\": {\"service_certificate\": \"/etc/pki/tls/certs/redis.crt\", \"hostname\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"RedisNetwork\"]}}, \"template\": \"%{hiera('cloud_name_NETWORK')}\"}}, \"service_key\": \"/etc/pki/tls/private/redis.key\", \"principal\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"RedisNetwork\"]}}, \"template\": \"redis/%{hiera('cloud_name_NETWORK')}\"}}}}, {}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"redis_enabled\", \"command\": \"systemctl is-enabled redis\", \"name\": \"Check if redis is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'redis' --property ActiveState | grep '\\\\bactive\\\\b'\
0.355 | 3311: \", \"when\": \"redis_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check if redis is running\", \"tags\": \"step0,validation\"}, {\"when\": \"redis_enabled.rc == 0\", \"name\": \"Stop redis service\", \"service\": \"name=redis state=stopped\", \"tags\": \"step1\"}, {\"yum\": \"name=redis state=latest\", \"when\": \"redis_enabled.rc != 0\", \"name\": \"Install redis package if it was disabled\", \"tags\": \"step3\"}, {\"name\": \"Start redis service\", \"service\": \"name=redis state=started\", \"tags\": \"step4\"}]}}}, \"conditions\": {\"use_tls_proxy\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}}, \"resources\": {\"RedisBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/redis-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/hosts-config.yaml": "{\"outputs\": {\"hosts_entries\": {\"description\": \"The content that should be appended to your /etc/hosts if you want to get\
0.355 | 3311: hostname-based access to the deployed nodes (useful for testing without\
0.355 | 3311: setting up a DNS).\
0.355 | 3311: \", \"value\": {\"get_param\": \"hosts\"}}, \"config_id\": {\"description\": \"The ID of the hostsConfigImpl resource.\", \"value\": {\"get_resource\": \"hostsConfigImpl\"}}, \"OS::stack_id\": {\"description\": \"The ID of the hostsConfigImpl resource.\", \"value\": {\"get_resource\": \"hostsConfigImpl\"}}}, \"heat_template_version\": \"pike\", \"description\": \"All Hosts Config\", \"parameters\": {\"hosts\": {\"type\": \"string\"}}, \"resources\": {\"hostsConfigImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"default\": {\"list_join\": [\" \", {\"str_split\": [\"\\\
0.355 | 3311: \", {\"get_param\": \"hosts\"}]}]}, \"name\": \"hosts\"}], \"config\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/scripts/hosts-config.sh\"}, \"group\": \"script\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/firstboot/userdata_default.yaml": "{\"outputs\": {\"OS::stack_id\": {\"value\": {\"get_resource\": \"userdata\"}}}, \"heat_template_version\": \"pike\", \"description\": \"This is a default no-op template which provides empty user-data which can be passed to the OS::Nova::Server resources. This template can be replaced with a different implementation via the resource registry, such that deployers may customize their first-boot configuration.\
0.355 | 3311: \", \"resources\": {\"userdata\": {\"type\": \"OS::Heat::MultipartMime\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/post_deploy/default.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Extra Post Deployment Config\", \"parameters\": {\"servers\": {\"type\": \"json\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/yum_update.yaml": "{\"outputs\": {\"OS::stack_id\": {\"value\": {\"get_resource\": \"config\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Software-config for performing package updates using yum\
0.355 | 3311: \", \"resources\": {\"config\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"inputs\": [{\"default\": \"\", \"name\": \"update_identifier\", \"description\": \"yum will only run for previously unused values of update_identifier\"}, {\"default\": \"update\", \"name\": \"command\", \"description\": \"yum sub-command to run, defaults to \\\"update\\\"\"}, {\"default\": \"\", \"name\": \"command_arguments\", \"description\": \"yum command arguments, defaults to \\\"\\\"\"}], \"config\": {\"list_join\": [\"\", [{\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pacemaker_common_functions.sh\"}, {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/yum_update.sh\"}]]}, \"outputs\": [{\"name\": \"update_managed_packages\", \"description\": \"boolean value indicating whether to upgrade managed packages\"}]}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-placement.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova Placement API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"NovaPlacementLoggingSource\": {\"default\": {\"path\": \"/var/log/httpd/nova_placement_wsgi_error_ssl.log\", \"tag\": \"openstack.nova.placement\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"MonitoringSubscriptionNovaPlacement\": {\"default\": \"overcloud-nova-placement\", \"type\": \"string\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NovaWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Nova services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"NovaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the nova service and db account\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova Placement API service.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"nova::wsgi::apache_placement::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"nova::wsgi::apache_placement::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}, \"nova::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneAdmin\", \"uri_no_suffix\"]}, \"nova::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri\"]}, \"nova::wsgi::apache_placement::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaPlacementNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"nova::keystone::authtoken::project_name\": \"service\", \"tripleo.nova_placement.firewall_rules\": {\"138 nova_placement\": {\"dport\": [8778, 13778]}}, \"nova::keystone::authtoken::password\": {\"get_param\": \"NovaPassword\"}, \"nova::wsgi::apache_placement::api_port\": \"8778\"}, {\"if\": [\"nova_workers_zero\", {}, {\"nova::wsgi::apache_placement::workers\": {\"get_param\": \"NovaWorkers\"}}]}]}, \"upgrade_tasks\": [{\"name\": \"Stop nova_placement service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}, {\"yum\": \"name=openstack-nova-placement-api state=latest\", \"name\": \"Install nova-placement packages on upgrade\", \"tags\": \"step3\"}], \"logging_groups\": [\"nova\"], \"service_name\": \"nova_placement\", \"step_config\": \"include tripleo::profile::base::nova::placement\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaPlacement\"}, \"service_config_settings\": {\"keystone\": {\"nova::keystone::auth_placement::internal_url\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementInternal\", \"uri\"]}, \"nova::keystone::auth_placement::region\": {\"get_param\": \"KeystoneRegion\"}, \"nova::keystone::auth_placement::tenant\": \"service\", \"nova::keystone::auth_placement::public_url\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementPublic\", \"uri\"]}, \"nova::keystone::auth_placement::admin_url\": {\"get_param\": [\"EndpointMap\", \"NovaPlacementAdmin\", \"uri\"]}, \"nova::keystone::auth_placement::password\": {\"get_param\": \"NovaPassword\"}}, \"mysql\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"service_config_settings\", \"mysql\"]}, {\"nova::db::mysql_placement::user\": \"nova_placement\", \"nova::db::mysql_placement::dbname\": \"nova_placement\", \"nova::db::mysql_placement::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"nova::db::mysql_placement::password\": {\"get_param\": \"NovaPassword\"}, \"nova::db::mysql_placement::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"]}]}}, \"logging_source\": {\"get_param\": \"NovaPlacementLoggingSource\"}}}}, \"conditions\": {\"nova_workers_zero\": {\"equals\": [{\"get_param\": \"NovaWorkers\"}, 0]}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-compute.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova Compute service.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"RoleParametersValue\", \"value\"]}, {\"nova::migration::live_migration_tunnelled\": {\"get_param\": \"NovaEnableRbdBackend\"}, \"tripleo::profile::base::nova::migration::client::nova_compute_enabled\": true, \"nova::compute::rbd::libvirt_rbd_secret_uuid\": {\"get_param\": \"CephClusterFSID\"}, \"nova::vncproxy::common::vncproxy_protocol\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"protocol\"]}, \"nova::compute::rbd::libvirt_rbd_secret_key\": {\"get_param\": \"CephClientKey\"}, \"nova::compute::rbd::libvirt_rbd_user\": {\"get_param\": \"CephClientUserName\"}, \"nova::compute::vncproxy_host\": {\"get_param\": [\"EndpointMap\", \"NovaPublic\", \"host_nobrackets\"]}, \"nova::compute::instance_usage_audit\": true, \"nova::compute::instance_usage_audit_period\": \"hour\", \"nova::vncproxy::common::vncproxy_host\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"host_nobrackets\"]}, \"tripleo::profile::base::nova::migration::client::ssh_private_key\": {\"get_param\": [\"MigrationSshKey\", \"private_key\"]}, \"nova::compute::rbd::libvirt_images_rbd_pool\": {\"get_param\": \"NovaRbdPoolName\"}, \"nova::compute::rbd::rbd_keyring\": {\"list_join\": [\".\", [\"client\", {\"get_param\": \"CephClientUserName\"}]]}, \"nova::compute::libvirt::manage_libvirt_services\": false, \"nova::compute::neutron::libvirt_vif_driver\": {\"get_param\": \"NovaComputeLibvirtVifDriver\"}, \"nova::compute::libvirt::migration_support\": false, \"nova::vncproxy::common::vncproxy_port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"port\"]}, \"rbd_persistent_storage\": {\"get_param\": \"CinderEnableRbdBackend\"}, \"nova::compute::pci_passthrough\": {\"str_replace\": {\"params\": {\"map_replace\": [{\"map_replace\": [{\"JSON_PARAM\": \"NovaPCIPassthrough\"}, {\"values\": {\"get_param\": [\"RoleParameters\"]}}]}, {\"values\": {\"NovaPCIPassthrough\": {\"get_param\": \"NovaPCIPassthrough\"}}}]}, \"template\": \"JSON_PARAM\"}}, \"tripleo::profile::base::nova::compute::cinder_nfs_backend\": {\"get_param\": \"CinderEnableNfsBackend\"}, \"tripleo::profile::base::nova::migration::client::ssh_port\": {\"get_param\": \"MigrationSshPort\"}, \"nova::compute::vncserver_proxyclient_address\": {\"get_param\": [\"ServiceNetMap\", \"NovaVncProxyNetwork\"]}, \"nova::compute::rbd::ephemeral_storage\": {\"get_param\": \"NovaEnableRbdBackend\"}}]}, \"upgrade_tasks\": [{\"name\": \"Stop nova-compute service\", \"service\": \"name=openstack-nova-compute state=stopped\", \"tags\": \"step1\"}, {\"ini_file\": {\"str_replace\": {\"params\": {\"LEVEL\": {\"get_param\": \"UpgradeLevelNovaCompute\"}}, \"template\": \"dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL\"}}, \"name\": \"Set compute upgrade level to auto\", \"tags\": \"step3\"}, {\"yum\": \"name=openstack-nova-migration state=latest\", \"name\": \"install openstack-nova-migration\", \"tags\": \"step3\"}, {\"name\": \"Start nova-compute service\", \"service\": \"name=openstack-nova-compute state=started\", \"tags\": \"step6\"}], \"logging_groups\": [\"nova\"], \"service_name\": \"nova_compute\", \"step_config\": \"# TODO(emilien): figure how to deal with libvirt profile.\
0.355 | 3311: # We'll probably treat it like we do with Neutron plugins.\
0.355 | 3311: # Until then, just include it in the default nova-compute role.\
0.355 | 3311: include tripleo::profile::base::nova::compute::libvirt\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaCompute\"}, \"service_config_settings\": {\"collectd\": {\"tripleo.collectd.plugins.nova_compute\": [\"virt\"], \"collectd::plugins::virt::connection\": \"qemu:///system\"}}, \"logging_source\": {\"get_param\": \"NovaComputeLoggingSource\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova Compute service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NovaReservedHostMemory\": {\"default\": 4096, \"type\": \"number\", \"description\": \"Reserved RAM for host processes.\
0.355 | 3311: \", \"constraints\": [{\"range\": {\"min\": 512}}]}, \"UpgradeLevelNovaCompute\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Nova Compute upgrade level\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"MigrationSshPort\": {\"default\": 22, \"type\": \"number\", \"description\": \"Target port for migration over ssh\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NovaEnableRbdBackend\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable or not the Rbd backend for Nova\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"CephClientUserName\": {\"default\": \"openstack\", \"type\": \"string\"}, \"CinderEnableRbdBackend\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable or not the Rbd backend for Cinder\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NovaPCIPassthrough\": {\"default\": \"\", \"type\": \"json\", \"description\": \"List of PCI Passthrough whitelist parameters. Example - NovaPCIPassthrough:\
0.355 | 3311: - vendor_id: \\\"8086\\\"\
0.355 | 3311: product_id: \\\"154c\\\"\
0.355 | 3311: address: \\\"0000:05:00.0\\\"\
0.355 | 3311: physical_network: \\\"datacentre\\\"\
0.355 | 3311: For different formats, refer to the nova.conf documentation for pci_passthrough_whitelist configuration\
0.355 | 3311: \"}, \"CephClusterFSID\": {\"type\": \"string\", \"description\": \"The Ceph cluster FSID. Must be a UUID.\"}, \"MigrationSshKey\": {\"default\": {\"public_key\": \"\", \"private_key\": \"\"}, \"type\": \"json\", \"description\": \"SSH key for migration. Expects a dictionary with keys 'public_key' and 'private_key'. Values should be identical to SSH public/private key files.\
0.355 | 3311: \"}, \"NovaComputeLoggingSource\": {\"default\": {\"path\": \"/var/log/nova/nova-compute.log\", \"tag\": \"openstack.nova.compute\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NovaRbdPoolName\": {\"default\": \"vms\", \"type\": \"string\"}, \"MonitoringSubscriptionNovaCompute\": {\"default\": \"overcloud-nova-compute\", \"type\": \"string\"}, \"CephClientKey\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The Ceph client key. Can be created with ceph-authtool --gen-print-key.\"}, \"NovaComputeLibvirtVifDriver\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Libvirt VIF driver configuration for the network\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NovaVcpuPinSet\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"A list or range of physical CPU cores to reserve for virtual machine processes. Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8\
0.355 | 3311: \"}, \"CinderEnableNfsBackend\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable or not the NFS backend for Cinder\"}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"RoleParametersValue\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"map_replace\": [{\"map_replace\": [{\"nova::compute::vcpu_pin_set\": \"NovaVcpuPinSet\", \"nova::compute::reserved_host_memory\": \"NovaReservedHostMemory\"}, {\"values\": {\"get_param\": [\"RoleParameters\"]}}]}, {\"values\": {\"NovaVcpuPinSet\": {\"get_param\": \"NovaVcpuPinSet\"}, \"NovaReservedHostMemory\": {\"get_param\": \"NovaReservedHostMemory\"}}}]}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-volume.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Cinder Volume role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionCinderVolume\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"CinderBase\", \"role_data\", \"config_settings\"]}, {\"tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size\": {\"get_param\": \"CinderLVMLoopDeviceSize\"}, \"tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions\": {\"get_param\": \"CinderNasSecureFilePermissions\"}, \"tripleo.cinder_volume.firewall_rules\": {\"120 iscsi initiator\": {\"dport\": 3260}}, \"tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name\": {\"get_param\": \"CephClientUserName\"}, \"tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend\": {\"get_param\": \"CinderEnableNfsBackend\"}, \"tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend\": {\"get_param\": \"CinderEnableRbdBackend\"}, \"tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper\": {\"get_param\": \"CinderISCSIHelper\"}, \"tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers\": {\"get_param\": \"CinderNfsServers\"}, \"tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options\": {\"get_param\": \"CinderNfsMountOptions\"}, \"tripleo::profile::base::cinder::volume::cinder_enable_iscsi_backend\": {\"get_param\": \"CinderEnableIscsiBackend\"}, \"tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address\": {\"get_param\": [\"ServiceNetMap\", \"CinderIscsiNetwork\"]}, \"tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol\": {\"get_param\": \"CinderISCSIProtocol\"}, \"tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name\": {\"get_param\": \"CinderRbdPoolName\"}, \"tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations\": {\"get_param\": \"CinderNasSecureFileOperations\"}}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"cinder_volume_enabled\", \"command\": \"systemctl is-enabled openstack-cinder-volume\", \"name\": \"Check if cinder_volume is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-cinder-volume' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"cinder_volume_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-cinder-volume is running\", \"tags\": \"step0,validation\"}, {\"when\": \"cinder_volume_enabled.rc == 0\", \"name\": \"Stop cinder_volume service\", \"service\": \"name=openstack-cinder-volume state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"cinder\"], \"service_name\": \"cinder_volume\", \"logging_source\": {\"get_param\": \"CinderVolumeLoggingSource\"}, \"step_config\": \"include ::tripleo::profile::base::cinder::volume\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Cinder Volume service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"CinderVolumeLoggingSource\": {\"default\": {\"path\": \"/var/log/cinder/cinder-volume.log\", \"tag\": \"openstack.cinder.volume\"}, \"type\": \"json\"}, \"CinderRbdPoolName\": {\"default\": \"volumes\", \"type\": \"string\"}, \"CinderNasSecureFileOperations\": {\"default\": false, \"type\": \"string\", \"description\": \"Controls whether security enhanced NFS file operations are enabled. Valid values are 'auto', 'true' or 'false'. Effective when CinderEnableNfsBackend is true.\
0.355 | 3311: \"}, \"CinderLVMLoopDeviceSize\": {\"default\": 10280, \"type\": \"number\", \"description\": \"The size of the loopback file used by the cinder LVM driver.\"}, \"CinderNfsServers\": {\"default\": \"\", \"type\": \"comma_delimited_list\", \"description\": \"NFS servers used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.\
0.355 | 3311: \"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"CinderNasSecureFilePermissions\": {\"default\": false, \"type\": \"string\", \"description\": \"Controls whether security enhanced NFS file permissions are enabled. Valid values are 'auto', 'true' or 'false'. Effective when CinderEnableNfsBackend is true.\
0.355 | 3311: \"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"CephClientUserName\": {\"default\": \"openstack\", \"type\": \"string\"}, \"CinderEnableRbdBackend\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable or not the Rbd backend for Cinder\"}, \"CinderNfsMountOptions\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Mount options for NFS mounts used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.\
0.355 | 3311: \"}, \"MonitoringSubscriptionCinderVolume\": {\"default\": \"overcloud-cinder-volume\", \"type\": \"string\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"CinderISCSIProtocol\": {\"default\": \"iscsi\", \"type\": \"string\", \"description\": \"Whether to use TCP ('iscsi') or iSER RDMA ('iser') for iSCSI\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"CinderEnableIscsiBackend\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to enable or not the Iscsi backend for Cinder\"}, \"CinderEnableNfsBackend\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable or not the NFS backend for Cinder\"}, \"CinderISCSIHelper\": {\"default\": \"lioadm\", \"type\": \"string\", \"description\": \"The iSCSI helper to use with cinder.\"}}, \"resources\": {\"CinderBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-executor.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Mistral Executor role.\", \"value\": {\"service_name\": \"mistral_executor\", \"step_config\": \"include ::tripleo::profile::base::mistral::executor\
0.355 | 3311: \", \"config_settings\": {\"get_attr\": [\"MistralBase\", \"role_data\", \"config_settings\"]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"mistral_executor_enabled\", \"command\": \"systemctl is-enabled openstack-mistral-executor\", \"name\": \"Check if mistral executor is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-mistral-executor' --property ActiveState | grep '\\\\bactive\\\\b'\
0.355 | 3311: \", \"when\": \"mistral_executor_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check if openstack-mistral-executor is running\", \"tags\": \"step0,validation\"}, {\"name\": \"Stop mistral_executor service\", \"service\": \"name=openstack-mistral-executor state=stopped\", \"tags\": \"step1\"}, {\"yum\": \"name=openstack-mistral-executor state=latest\", \"when\": \"mistral_executor_enabled.rc != 0\", \"name\": \"Install openstack-mistral-executor package if it was disabled\", \"tags\": \"step3\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"Openstack Mistral API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"MistralBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/tuned.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for tuned\", \"value\": {\"service_name\": \"tuned\", \"step_config\": \"include ::tripleo::profile::base::tuned\", \"config_settings\": {\"map_replace\": [{\"map_replace\": [{\"tripleo::profile::base::tuned::profile\": \"TunedProfileName\"}, {\"values\": {\"get_param\": \"RoleParameters\"}}]}, {\"values\": {\"TunedProfileName\": {\"get_param\": \"TunedProfileName\"}}}]}}}}, \"heat_template_version\": \"ocata\", \"description\": \"Configure tuned\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"TunedProfileName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Tuned Profile to apply to the host\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/scripts/hosts-config.sh": "#!/bin/bash
0.355 | 3311: set -eux
0.355 | 3311: set -o pipefail
0.355 | 3311:
0.355 | 3311: write_entries() {
0.355 | 3311: local file=\"$1\"
0.355 | 3311: local entries=\"$2\"
0.355 | 3311:
0.355 | 3311: # Don't do anything if the file isn't there
0.355 | 3311: if [ ! -f \"$file\" ]; then
0.355 | 3311: return
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: if grep -q \"^# HEAT_HOSTS_START\" \"$file\"; then
0.355 | 3311: temp=$(mktemp)
0.355 | 3311: (
0.355 | 3311: sed '/^# HEAT_HOSTS_START/,$d' \"$file\"
0.355 | 3311: echo -ne \"\
0.355 | 3311: # HEAT_HOSTS_START - Do not edit manually within this section!\
0.355 | 3311: \"
0.355 | 3311: echo \"$entries\"
0.355 | 3311: echo -ne \"# HEAT_HOSTS_END\
0.355 | 3311: \
0.355 | 3311: \"
0.355 | 3311: sed '1,/^# HEAT_HOSTS_END/d' \"$file\"
0.355 | 3311: ) > \"$temp\"
0.355 | 3311: echo \"INFO: Updating hosts file $file, check below for changes\"
0.355 | 3311: diff \"$file\" \"$temp\" || true
0.355 | 3311: cat \"$temp\" > \"$file\"
0.355 | 3311: else
0.355 | 3311: echo -ne \"\
0.355 | 3311: # HEAT_HOSTS_START - Do not edit manually within this section!\
0.355 | 3311: \" >> \"$file\"
0.355 | 3311: echo \"$entries\" >> \"$file\"
0.355 | 3311: echo -ne \"# HEAT_HOSTS_END\
0.355 | 3311: \
0.355 | 3311: \" >> \"$file\"
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: }
0.355 | 3311:
0.355 | 3311: if [ ! -z \"$hosts\" ]; then
0.355 | 3311: for tmpl in /etc/cloud/templates/hosts.*.tmpl ; do
0.355 | 3311: write_entries \"$tmpl\" \"$hosts\"
0.355 | 3311: done
0.355 | 3311: write_entries \"/etc/hosts\" \"$hosts\"
0.355 | 3311: else
0.355 | 3311: echo \"No hosts in Heat, nothing written.\"
0.355 | 3311: fi
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/yum_update.sh": "#!/bin/bash
0.355 | 3311:
0.355 | 3311: # A heat-config-script which runs yum update during a stack-update.
0.355 | 3311: # Inputs:
0.355 | 3311: # deploy_action - yum will only be run if this is UPDATE
0.355 | 3311: # update_identifier - yum will only run for previously unused values of update_identifier
0.355 | 3311: # command - yum sub-command to run, defaults to \"update\"
0.355 | 3311: # command_arguments - yum command arguments, defaults to \"\"
0.355 | 3311:
0.355 | 3311: echo \"Started yum_update.sh on server $deploy_server_id at `date`\"
0.355 | 3311: echo -n \"false\" > $heat_outputs_path.update_managed_packages
0.355 | 3311:
0.355 | 3311: if [ -f /.dockerenv ]; then
0.355 | 3311: echo \"Not running due to running inside a container\"
0.355 | 3311: exit 0
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: if [[ -z \"$update_identifier\" ]]; then
0.355 | 3311: echo \"Not running due to unset update_identifier\"
0.355 | 3311: exit 0
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: timestamp_dir=/var/lib/overcloud-yum-update
0.355 | 3311: mkdir -p $timestamp_dir
0.355 | 3311:
0.355 | 3311: # sanitise to remove unusual characters
0.355 | 3311: update_identifier=${update_identifier//[^a-zA-Z0-9-_]/}
0.355 | 3311:
0.355 | 3311: # seconds to wait for this node to rejoin the cluster after update
0.355 | 3311: cluster_start_timeout=600
0.355 | 3311: galera_sync_timeout=1800
0.355 | 3311: cluster_settle_timeout=1800
0.355 | 3311:
0.355 | 3311: timestamp_file=\"$timestamp_dir/$update_identifier\"
0.355 | 3311: if [[ -a \"$timestamp_file\" ]]; then
0.355 | 3311: echo \"Not running for already-run timestamp \\\"$update_identifier\\\"\"
0.355 | 3311: exit 0
0.355 | 3311: fi
0.355 | 3311: touch \"$timestamp_file\"
0.355 | 3311:
0.355 | 3311: pacemaker_status=\"\"
0.355 | 3311: # We include word boundaries in order to not match pacemaker_remote
0.355 | 3311: if hiera -c /etc/puppet/hiera.yaml service_names | grep -q '\\bpacemaker\\b'; then
0.355 | 3311: pacemaker_status=$(systemctl is-active pacemaker)
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: # (NB: when backporting this s/pacemaker_short_bootstrap_node_name/bootstrap_nodeid)
0.355 | 3311: # This runs before the yum_update so we are guaranteed to run it even in the absence
0.355 | 3311: # of packages to update (the check for -z \"$update_identifier\" guarantees that this
0.355 | 3311: # is run only on overcloud stack update -i)
0.355 | 3311: if [[ \"$pacemaker_status\" == \"active\" && \\
0.355 | 3311: \"$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name | tr '[:upper:]' '[:lower:]')\" == \"$(facter hostname | tr '[:upper:]' '[:lower:]')\" ]] ; then \\
0.355 | 3311: # OCF scripts don't cope with -eu
0.355 | 3311: echo \"Verifying if we need to fix up any IPv6 VIPs\"
0.355 | 3311: set +eu
0.355 | 3311: fixup_wrong_ipv6_vip
0.355 | 3311: ret=$?
0.355 | 3311: set -eu
0.355 | 3311: if [ $ret -ne 0 ]; then
0.355 | 3311: echo \"Fixing up IPv6 VIPs failed. Stopping here. (See https://bugs.launchpad.net/tripleo/+bug/1686357 for more info)\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: command_arguments=${command_arguments:-}
0.355 | 3311:
0.355 | 3311: # Always ensure yum has full cache
0.355 | 3311: yum makecache || echo \"Yum makecache failed. This can cause failure later on.\"
0.355 | 3311:
0.355 | 3311: # yum check-update exits 100 if updates are available
0.355 | 3311: set +e
0.355 | 3311: check_update=$(yum check-update 2>&1)
0.355 | 3311: check_update_exit=$?
0.355 | 3311: set -e
0.355 | 3311:
0.355 | 3311: if [[ \"$check_update_exit\" == \"1\" ]]; then
0.355 | 3311: echo \"Failed to check for package updates\"
0.355 | 3311: echo \"$check_update\"
0.355 | 3311: exit 1
0.355 | 3311: elif [[ \"$check_update_exit\" != \"100\" ]]; then
0.355 | 3311: echo \"No packages require updating\"
0.355 | 3311: exit 0
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: # special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
0.355 | 3311: special_case_ovs_upgrade_if_needed
0.355 | 3311:
0.355 | 3311: # Resolve any RPM dependency issues before attempting the update
0.355 | 3311: yum_pre_update
0.355 | 3311:
0.355 | 3311: if [[ \"$pacemaker_status\" == \"active\" ]] ; then
0.355 | 3311: echo \"Pacemaker running, stopping cluster node and doing full package update\"
0.355 | 3311: node_count=$(pcs status xml | grep -o \"<nodes_configured.*/>\" | grep -o 'number=\"[0-9]*\"' | grep -o \"[0-9]*\")
0.355 | 3311: if [[ \"$node_count\" == \"1\" ]] ; then
0.355 | 3311: echo \"Active node count is 1, stopping node with --force\"
0.355 | 3311: pcs cluster stop --force
0.355 | 3311: else
0.355 | 3311: pcs cluster stop
0.355 | 3311: fi
0.355 | 3311: else
0.355 | 3311: echo \"Upgrading openstack-puppet-modules and its dependencies\"
0.355 | 3311: check_for_yum_lock
0.355 | 3311: yum -q -y update openstack-puppet-modules
0.355 | 3311: yum deplist openstack-puppet-modules | awk '/dependency/{print $2}' | xargs yum -q -y update
0.355 | 3311: echo \"Upgrading other packages is handled by config management tooling\"
0.355 | 3311: echo -n \"true\" > $heat_outputs_path.update_managed_packages
0.355 | 3311: exit 0
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: command=${command:-update}
0.355 | 3311: full_command=\"yum -q -y $command $command_arguments\"
0.355 | 3311:
0.355 | 3311: echo \"Running: $full_command\"
0.355 | 3311: check_for_yum_lock
0.355 | 3311: result=$($full_command)
0.355 | 3311: return_code=$?
0.355 | 3311: echo \"$result\"
0.355 | 3311: echo \"yum return code: $return_code\"
0.355 | 3311:
0.355 | 3311: if [[ \"$pacemaker_status\" == \"active\" ]] ; then
0.355 | 3311: echo \"Starting cluster node\"
0.355 | 3311: pcs cluster start
0.355 | 3311:
0.355 | 3311: hostname=$(hostname -s)
0.355 | 3311: tstart=$(date +%s)
0.355 | 3311: while [[ \"$(pcs status | grep \"^Online\" | grep -F -o $hostname)\" == \"\" ]]; do
0.355 | 3311: sleep 5
0.355 | 3311: tnow=$(date +%s)
0.355 | 3311: if (( tnow-tstart > cluster_start_timeout )) ; then
0.355 | 3311: echo \"ERROR $hostname failed to join cluster in $cluster_start_timeout seconds\"
0.355 | 3311: pcs status
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311:
0.355 | 3311: RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
0.355 | 3311:
0.355 | 3311: if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
0.355 | 3311: tstart=$(date +%s)
0.355 | 3311: while ! clustercheck; do
0.355 | 3311: sleep 5
0.355 | 3311: tnow=$(date +%s)
0.355 | 3311: if (( tnow-tstart > galera_sync_timeout )) ; then
0.355 | 3311: echo \"ERROR galera sync timed out\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311: done
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: echo \"Waiting for pacemaker cluster to settle\"
0.355 | 3311: if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
0.355 | 3311: echo \"ERROR timed out while waiting for the cluster to settle\"
0.355 | 3311: exit 1
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311: pcs status
0.355 | 3311: fi
0.355 | 3311:
0.355 | 3311:
0.355 | 3311: echo \"Finished yum_update.sh on server $deploy_server_id at `date` with return code: $return_code\"
0.355 | 3311:
0.355 | 3311: exit $return_code
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-ipmi.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Ceilometer Agent Ipmi role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionCeilometerIpmi\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"CeilometerServiceBase\", \"role_data\", \"config_settings\"]}, {\"ceilometer_redis_password\": {\"get_param\": \"RedisPassword\"}, \"ipmi_namespace\": true}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"ceilometer_ipmi_enabled\", \"command\": \"systemctl is-enabled openstack-ceilometer-ipmi\", \"name\": \"Check if ceilometer-agent-ipmi is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-ceilometer-ipmi' --property ActiveState | grep '\\\\bactive\\\\b'\
0.355 | 3311: \", \"when\": \"ceilometer_ipmi_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check if openstack-ceilometer-ipmi is running\", \"tags\": \"step0,validation\"}, {\"when\": \"ceilometer_ipmi_enabled.rc == 0\", \"name\": \"Stop openstack-ceilometer-ipmi service\", \"service\": \"name=openstack-ceilometer-ipmi state=stopped\", \"tags\": \"step1\"}, {\"yum\": \"name=openstack-ceilometer-ipmi state=latest\", \"when\": \"ceilometer_ipmi_enabled.rc != 0\", \"name\": \"Install openstack-ceilometer-ipmi package if it was disabled\", \"tags\": \"step3\"}], \"logging_groups\": [\"ceilometer\"], \"service_name\": \"ceilometer_agent_ipmi\", \"logging_source\": {\"get_param\": \"CeilometerAgentIpmiLoggingSource\"}, \"step_config\": \"include ::tripleo::profile::base::ceilometer::agent::polling\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Ceilometer Ipmi Agent service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RedisPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the redis service account.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"CeilometerAgentIpmiLoggingSource\": {\"default\": {\"path\": \"/var/log/ceilometer/ipmi.log\", \"tag\": \"openstack.ceilometer.agent.ipmi\"}, \"type\": \"json\"}, \"MonitoringSubscriptionCeilometerIpmi\": {\"default\": \"overcloud-ceilometer-agent-ipmi\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"CeilometerServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker/database/mysql.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Service MySQL with Pacemaker using composable services.\", \"value\": {\"metadata_settings\": {\"get_attr\": [\"MysqlBase\", \"role_data\", \"metadata_settings\"]}, \"service_name\": \"mysql\", \"step_config\": \"include ::tripleo::profile::pacemaker::database::mysql\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"MysqlBase\", \"role_data\", \"config_settings\"]}, {\"tripleo::profile::pacemaker::database::mysql::bind_address\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"tripleo::profile::pacemaker::database::mysql::ca_file\": {\"get_param\": \"InternalTLSCAFile\"}, \"tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}]}, \"upgrade_tasks\": [{\"name\": \"Check for galera root password\", \"file\": \"path=/root/.my.cnf state=file\", \"tags\": \"step0\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"MySQL with Pacemaker service deployment using puppet\
0.355 | 3311: \", \"parameters\": {\"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"InternalTLSCAFile\": {\"default\": \"/etc/ipa/ca.crt\", \"type\": \"string\", \"description\": \"Specifies the default CA cert to use if TLS is used for services in the internal network.\"}}, \"resources\": {\"MysqlBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/mysql.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-metadata.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NovaWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Nova services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova Metadata service.\", \"value\": {\"metadata_settings\": {\"get_attr\": [\"TLSProxyBase\", \"role_data\", \"metadata_settings\"]}, \"service_name\": \"nova_metadata\", \"step_config\": \"\", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"TLSProxyBase\", \"role_data\", \"config_settings\"]}, {\"nova::api::metadata_listen\": {\"if\": [\"use_tls_proxy\", \"localhost\", {\"get_param\": [\"ServiceNetMap\", \"NovaMetadataNetwork\"]}]}}, {\"if\": [\"nova_workers_zero\", {}, {\"nova::api::metadata_workers\": {\"get_param\": \"NovaWorkers\"}}]}, {\"if\": [\"use_tls_proxy\", {\"tripleo::profile::base::nova::api::metadata_tls_proxy_fqdn\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaMetadataNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"tripleo::profile::base::nova::api::metadata_tls_proxy_bind_ip\": {\"get_param\": [\"ServiceNetMap\", \"NovaMetadataNetwork\"]}}, {}]}]}}}}, \"conditions\": {\"use_tls_proxy\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}, \"nova_workers_zero\": {\"equals\": [{\"get_param\": \"NovaWorkers\"}, 0]}}, \"resources\": {\"TLSProxyBase\": {\"type\": \"OS::TripleO::Services::TLSProxyBase\", \"properties\": {\"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-api-cloudwatch.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Openstack Heat CloudWatch API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"HeatWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Heat service.\"}, \"MonitoringSubscriptionHeatApiCloudwatch\": {\"default\": \"overcloud-heat-api-cloudwatch\", \"type\": \"string\"}, \"HeatApiCloudwatchLoggingSource\": {\"default\": {\"path\": \"/var/log/heat/heat-api-cloudwatch.log\", \"tag\": \"openstack.heat.api.cloudwatch\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Heat Cloudwatch API role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"HeatBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"heat::api_cloudwatch::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiCloudwatchNetwork\"]}, \"heat::api_cloudwatch::service_name\": \"httpd\", \"heat::wsgi::apache_api_cloudwatch::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"tripleo.heat_api_cloudwatch.firewall_rules\": {\"125 heat_cloudwatch\": {\"dport\": [8003, 13003]}}, \"heat::wsgi::apache_api_cloudwatch::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiCloudwatchNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"heat::wsgi::apache_api_cloudwatch::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiCloudwatchNetwork\"]}}, {\"if\": [\"heat_workers_zero\", {}, {\"heat::wsgi::apache_api_cloudwatch::workers\": {\"get_param\": \"HeatWorkers\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"heat_api_cloudwatch_enabled\", \"command\": \"systemctl is-enabled openstack-heat-api-cloudwatch\", \"name\": \"Check if heat_api_cloudwatch is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"heat_api_cloudwatch_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-heat-api-cloudwatch is running\", \"tags\": \"step0,validation\"}, {\"ignore_errors\": true, \"shell\": \"httpd -t -D DUMP_VHOSTS | grep -q heat_api_cloudwatch_wsgi\", \"register\": \"heat_api_cloudwatch_apache\", \"name\": \"check for heat_api_cloudwatch running under apache (post upgrade)\", \"tags\": \"step1\"}, {\"when\": \"heat_api_cloudwatch_apache.rc == 0\", \"name\": \"Stop heat_api_cloudwatch service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}, {\"when\": \"heat_api_cloudwatch_enabled.rc == 0\", \"name\": \"Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd)\", \"service\": \"name=openstack-heat-api-cloudwatch state=stopped enabled=no\", \"tags\": \"step1\"}], \"logging_groups\": [\"heat\"], \"service_name\": \"heat_api_cloudwatch\", \"step_config\": \"include ::tripleo::profile::base::heat::api_cloudwatch\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionHeatApiCloudwatch\"}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"HeatApiCloudwatchLoggingSource\"}}}}, \"conditions\": {\"heat_workers_zero\": {\"equals\": [{\"get_param\": \"HeatWorkers\"}, 0]}}, \"resources\": {\"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"HeatBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/deploy-artifacts.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The ID of the DeployArtifacts resource.\", \"value\": {\"get_resource\": \"DeployArtifacts\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Software Config to install deployment artifacts (tarball's and/or distribution packages) via HTTP URLs. The contents of the URL's can be tarballs or distribution packages (RPMs). If a tarball URL is supplied it is extracted onto the target node during deployment. If a package is deployed it is installed from the supplied URL. Note, you need the heat-config-script element built into your images, due to the script group below.\
0.355 | 3311: \", \"parameters\": {\"DeployArtifactURLs\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"A list of HTTP URLs containing deployment artifacts. Currently supports tarballs and RPM packages.\"}}, \"resources\": {\"DeployArtifacts\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"default\": {\"list_join\": [\" \", {\"get_param\": \"DeployArtifactURLs\"}]}, \"name\": \"artifact_urls\"}], \"config\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/deploy-artifacts.sh\"}, \"group\": \"script\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/networks.yaml": "{\"outputs\": {\"net_cidr_map\": {\"value\": {\"management\": {\"yaql\": {\"data\": {\"get_attr\": [\"ManagementNetwork\", \"subnet_cidr\"]}, \"expression\": \"str($.data).replace('null', 'disabled')\"}}, \"external\": {\"yaql\": {\"data\": {\"get_attr\": [\"ExternalNetwork\", \"subnet_cidr\"]}, \"expression\": \"str($.data).replace('null', 'disabled')\"}}, \"internal_api\": {\"yaql\": {\"data\": {\"get_attr\": [\"InternalNetwork\", \"subnet_cidr\"]}, \"expression\": \"str($.data).replace('null', 'disabled')\"}}, \"storage_mgmt\": {\"yaql\": {\"data\": {\"get_attr\": [\"StorageMgmtNetwork\", \"subnet_cidr\"]}, \"expression\": \"str($.data).replace('null', 'disabled')\"}}, \"storage\": {\"yaql\": {\"data\": {\"get_attr\": [\"StorageNetwork\", \"subnet_cidr\"]}, \"expression\": \"str($.data).replace('null', 'disabled')\"}}, \"tenant\": {\"yaql\": {\"data\": {\"get_attr\": [\"TenantNetwork\", \"subnet_cidr\"]}, \"expression\": \"str($.data).replace('null', 'disabled')\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"Create networks to split out Overcloud traffic\", \"resources\": {\"ManagementNetwork\": {\"type\": \"OS::TripleO::Network::Management\"}, \"InternalNetwork\": {\"type\": \"OS::TripleO::Network::InternalApi\"}, \"StorageNetwork\": {\"type\": \"OS::TripleO::Network::Storage\"}, \"TenantNetwork\": {\"type\": \"OS::TripleO::Network::Tenant\"}, \"StorageMgmtNetwork\": {\"type\": \"OS::TripleO::Network::StorageMgmt\"}, \"ExternalNetwork\": {\"type\": \"OS::TripleO::Network::External\"}, \"NetworkExtraConfig\": {\"type\": \"OS::TripleO::Network::ExtraConfig\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/keepalived.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Keepalived role.\", \"value\": {\"service_name\": \"keepalived\", \"step_config\": \"include ::tripleo::profile::base::keepalived\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"tripleo.keepalived.firewall_rules\": {\"106 keepalived vrrp\": {\"proto\": \"vrrp\"}}}, {\"if\": [\"control_iface_empty\", {}, {\"tripleo::keepalived::control_virtual_interface\": {\"get_param\": \"ControlVirtualInterface\"}}]}, {\"if\": [\"public_iface_empty\", {}, {\"tripleo::keepalived::public_virtual_interface\": {\"get_param\": \"PublicVirtualInterface\"}}]}]}, \"upgrade_tasks\": [{\"name\": \"Stop keepalived service\", \"service\": \"name=keepalived state=stopped\", \"tags\": \"step1\"}, {\"name\": \"Start keepalived service\", \"service\": \"name=keepalived state=started\", \"tags\": \"step4\"}], \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionKeepalived\"}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"public_iface_empty\": {\"equals\": [{\"get_param\": \"PublicVirtualInterface\"}, \"\"]}, \"control_iface_empty\": {\"equals\": [{\"get_param\": \"ControlVirtualInterface\"}, \"\"]}}, \"description\": \"Keepalived service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ControlVirtualInterface\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Interface where virtual ip will be assigned. This value will be automatically set by the deployment tool. Overriding here will override automatic setting.\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"MonitoringSubscriptionKeepalived\": {\"default\": \"overcloud-keepalived\", \"type\": \"string\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"PublicVirtualInterface\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Interface where virtual ip will be assigned. This value will be automatically set by the deployment tool. Overriding here will override automatic setting.\
0.355 | 3311: \"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/mysql.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Service MySQL using composable services.\", \"value\": {\"metadata_settings\": {\"if\": [\"internal_tls_enabled\", [{\"type\": \"vip\", \"network\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}, \"service\": \"mysql\"}, {\"type\": \"node\", \"network\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}, \"service\": \"mysql\"}], null]}, \"service_name\": \"mysql\", \"step_config\": \"include ::tripleo::profile::base::database::mysql\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"mysql_max_connections\": {\"get_param\": \"MysqlMaxConnections\"}, \"mysql::server::manage_config_file\": true, \"tripleo.mysql.firewall_rules\": {\"104 mysql galera\": {\"dport\": [873, 3306, 4444, 4567, 4568, 9200]}}, \"tripleo::profile::base::database::mysql::client_bind_address\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}, \"mysql_bind_host\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}, \"mysql_clustercheck_password\": {\"get_param\": \"MysqlClustercheckPassword\"}, \"enable_galera\": {\"get_param\": \"EnableGalera\"}, \"tripleo::profile::base::database::mysql::generate_dropin_file_limit\": {\"get_param\": \"MysqlIncreaseFileLimit\"}, \"tripleo::profile::base::database::mysql::bind_address\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"mysql::server::package_name\": \"mariadb-galera-server\", \"mysql::server::root_password\": {\"yaql\": {\"expression\": \"$.data.passwords.where($ != '').first()\", \"data\": {\"passwords\": [{\"get_param\": \"MysqlRootPassword\"}, {\"get_param\": [\"DefaultPasswords\", \"mysql_root_password\"]}]}}}}, {\"tripleo::profile::base::database::mysql::certificate_specs\": {\"service_certificate\": \"/etc/pki/tls/certs/mysql.crt\", \"hostname\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"%{hiera('cloud_name_NETWORK')}\"}}, \"dnsnames\": [{\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"%{hiera('cloud_name_NETWORK')}\"}}, {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}], \"service_key\": \"/etc/pki/tls/private/mysql.key\", \"principal\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}}, \"template\": \"mysql/%{hiera('cloud_name_NETWORK')}\"}}}, \"generate_service_certificates\": true}]}, \"upgrade_tasks\": [{\"name\": \"Check for galera root password\", \"file\": \"path=/root/.my.cnf state=file\", \"tags\": \"step0\"}, {\"name\": \"Stop service\", \"service\": \"name=mariadb state=stopped\", \"tags\": \"step2\"}, {\"name\": \"Start service\", \"service\": \"name=mariadb state=started\", \"tags\": \"step4\"}, {\"tags\": \"step4\", \"name\": \"Setup cell_v2 (create cell0 database)\", \"mysql_db\": {\"state\": \"present\", \"name\": \"nova_cell0\"}}, {\"mysql_user\": {\"str_replace\": {\"params\": {\"PASSWORD\": {\"get_param\": \"NovaPassword\"}}, \"template\": \"name=nova password=PASSWORD host=\\\"%\\\" priv=\\\"nova.*:ALL/nova_cell0.*:ALL,GRANT\\\" state=present\"}}, \"name\": \"Setup cell_v2 (grant access to the nova DB user)\", \"tags\": \"step4\"}]}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"internal_tls_enabled\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}}, \"description\": \"MySQL service deployment using puppet\
0.355 | 3311: \", \"parameters\": {\"MysqlMaxConnections\": {\"default\": 4096, \"type\": \"number\", \"description\": \"Configures MySQL max_connections config setting\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"EnableGalera\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to use Galera instead of regular MariaDB.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"MysqlClustercheckPassword\": {\"hidden\": true, \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"MysqlRootPassword\": {\"default\": \"\", \"hidden\": true, \"type\": \"string\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"NovaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the nova service and db account\"}, \"MysqlIncreaseFileLimit\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Flag to increase MySQL open-files-limit to 16384\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Shared role data for the Heat services.\", \"value\": {\"service_name\": \"heat_base\", \"config_settings\": {\"heat::cron::purge_deleted::month\": {\"get_param\": \"HeatCronPurgeDeletedMonth\"}, \"heat::db::database_db_max_retries\": -1, \"heat::enable_proxy_headers_parsing\": true, \"heat::cron::purge_deleted::destination\": {\"get_param\": \"HeatCronPurgeDeletedDestination\"}, \"heat::max_json_body_size\": {\"get_param\": \"HeatMaxJsonBodySize\"}, \"heat::cron::purge_deleted::monthday\": {\"get_param\": \"HeatCronPurgeDeletedMonthday\"}, \"heat::yaql_memory_quota\": 100000, \"heat::cron::purge_deleted::age_type\": {\"get_param\": \"HeatCronPurgeDeletedAgeType\"}, \"heat::keystone::domain::domain_name\": \"heat_stack\", \"heat::cron::purge_deleted::age\": {\"get_param\": \"HeatCronPurgeDeletedAge\"}, \"heat::keystone::domain::domain_admin_email\": \"heat_stack_domain_admin@localhost\", \"heat::cron::purge_deleted::ensure\": {\"get_param\": \"HeatCronPurgeDeletedEnsure\"}, \"heat::keystone::authtoken::project_name\": \"service\", \"heat::keystone::authtoken::project_domain_name\": \"Default\", \"heat::keystone::domain::domain_admin\": \"heat_stack_domain_admin\", \"heat::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"HeatDebug\"}]}, \"heat::db::database_max_retries\": -1, \"heat::cron::purge_deleted::user\": {\"get_param\": \"HeatCronPurgeDeletedUser\"}, \"heat::yaql_limit_iterators\": 1000, \"heat::keystone::authtoken::user_domain_name\": \"Default\", \"heat::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"heat::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"heat::rpc_response_timeout\": 600, \"heat::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"heat::cron::purge_deleted::hour\": {\"get_param\": \"HeatCronPurgeDeletedHour\"}, \"heat::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"heat::rabbit_heartbeat_timeout_threshold\": 60, \"heat::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"heat::keystone::authtoken::password\": {\"get_param\": \"HeatPassword\"}, \"heat::cron::purge_deleted::minute\": {\"get_param\": \"HeatCronPurgeDeletedMinute\"}, \"heat::policy::policies\": {\"context_is_admin\": {\"key\": \"context_is_admin\", \"value\": \"role:admin\"}}, \"heat::cron::purge_deleted::maxdelay\": {\"get_param\": \"HeatCronPurgeDeletedMaxDelay\"}, \"heat::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"heat::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"heat::cron::purge_deleted::weekday\": {\"get_param\": \"HeatCronPurgeDeletedWeekday\"}}, \"service_config_settings\": {\"keystone\": {\"tripleo::profile::base::keystone::heat_admin_email\": \"heat_stack_domain_admin@localhost\", \"tripleo::profile::base::keystone::heat_admin_domain\": \"heat_stack\", \"tripleo::profile::base::keystone::heat_admin_user\": \"heat_stack_domain_admin\"}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"HeatDebug\"}, \"\"]}}, \"description\": \"Openstack Heat base service. Shared for all Heat services.\
0.355 | 3311: \", \"parameters\": {\"HeatCronPurgeDeletedAgeType\": {\"default\": \"days\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Age type\
0.355 | 3311: \"}, \"HeatCronPurgeDeletedMaxDelay\": {\"default\": \"3600\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Max Delay\
0.355 | 3311: \"}, \"HeatCronPurgeDeletedMonthday\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Month Day\
0.355 | 3311: \"}, \"HeatCronPurgeDeletedHour\": {\"default\": \"0\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Hour\
0.355 | 3311: \"}, \"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"HeatCronPurgeDeletedMonth\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Month\
0.355 | 3311: \"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"HeatPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the Heat service and db account, used by the Heat services.\"}, \"HeatCronPurgeDeletedUser\": {\"default\": \"heat\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - User\
0.355 | 3311: \"}, \"HeatMaxJsonBodySize\": {\"default\": 1048576, \"type\": \"number\", \"description\": \"Maximum raw byte size of the Heat API JSON request body.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"HeatCronPurgeDeletedAge\": {\"default\": \"30\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Age\
0.355 | 3311: \"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"HeatCronPurgeDeletedEnsure\": {\"default\": \"present\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Ensure\
0.355 | 3311: \"}, \"HeatCronPurgeDeletedMinute\": {\"default\": \"1\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Minute\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"HeatDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Heat services.\"}, \"HeatCronPurgeDeletedDestination\": {\"default\": \"/dev/null\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Log destination\
0.355 | 3311: \"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"HeatCronPurgeDeletedWeekday\": {\"default\": \"*\", \"type\": \"string\", \"description\": \"Cron to purge db entries marked as deleted and older than $age - Week Day\
0.355 | 3311: \"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pre_puppet_pacemaker.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Pre-Puppet Config for Pacemaker deployments\", \"parameters\": {\"input_values\": {\"type\": \"json\", \"description\": \"input values for the software deployments\"}, \"servers\": {\"type\": \"json\"}}, \"resources\": {\"ControllerPrePuppetMaintenanceModeConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pacemaker_maintenance_mode.sh\"}}}, \"ControllerPrePuppetMaintenanceModeDeployment\": {\"type\": \"OS::Heat::SoftwareDeployments\", \"properties\": {\"input_values\": {\"get_param\": \"input_values\"}, \"config\": {\"get_resource\": \"ControllerPrePuppetMaintenanceModeConfig\"}, \"servers\": {\"get_param\": \"servers\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/services.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Combined Role data for this set of services.\", \"value\": {\"logging_sources\": {\"get_attr\": [\"LoggingSourcesConfig\", \"value\"]}, \"global_config_settings\": {\"get_attr\": [\"GlobalConfigSettings\", \"value\"]}, \"upgrade_tasks\": {\"get_attr\": [\"UpgradeTasks\", \"value\"]}, \"host_prep_tasks\": {\"get_attr\": [\"HostPrepTasks\", \"value\"]}, \"docker_config\": {\"get_attr\": [\"DockerConfig\", \"value\"]}, \"step_config\": {\"get_attr\": [\"PuppetStepConfig\", \"value\"]}, \"puppet_config\": {\"get_attr\": [\"PuppetConfig\", \"value\"]}, \"logging_groups\": {\"get_attr\": [\"LoggingGroupsConfig\", \"value\"]}, \"upgrade_batch_tasks\": {\"get_attr\": [\"UpgradeBatchTasks\", \"value\"]}, \"kolla_config\": {\"get_attr\": [\"KollaConfig\", \"value\"]}, \"config_settings\": {\"map_merge\": {\"get_attr\": [\"ServiceChain\", \"role_data\", \"config_settings\"]}}, \"service_names\": {\"get_attr\": [\"ServiceNames\", \"value\"]}, \"update_tasks\": {\"get_attr\": [\"UpdateTasks\", \"value\"]}, \"service_config_settings\": {\"get_attr\": [\"ServiceConfigSettings\", \"value\"]}, \"service_metadata_settings\": {\"get_attr\": [\"ServiceServerMetadataHook\", \"metadata\"]}, \"docker_puppet_tasks\": {\"get_attr\": [\"DockerPuppetTasks\", \"value\"]}, \"workflow_tasks\": {\"get_attr\": [\"WorkflowTasks\", \"value\"]}, \"monitoring_subscriptions\": {\"get_attr\": [\"MonitoringSubscriptionsConfig\", \"value\"]}}}}, \"heat_template_version\": \"pike\", \"description\": \"Utility stack to convert an array of services into a set of combined role configs.\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service -> default password. Used to help pass top level passwords managed by Heat into services.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"Services\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"List nested stack service templates.\
0.355 | 3311: \"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"ServiceChain\": {\"type\": \"OS::Heat::ResourceChain\", \"properties\": {\"concurrent\": true, \"resource_properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}, \"resources\": {\"get_param\": \"Services\"}}}, \"PuppetStepConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"string\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data.service_names, []).zip(coalesce($.data.step_config, []), coalesce($.data.docker_config, [])).where($[2] = null).where($[1] != null).select($[1]).join(\\\"\\\
0.355 | 3311: \\\")\", \"data\": {\"service_names\": {\"get_attr\": [\"ServiceChain\", \"role_data\", \"service_name\"]}, \"step_config\": {\"get_attr\": [\"ServiceChain\", \"role_data\", \"step_config\"]}, \"docker_config\": {\"get_attr\": [\"ServiceChain\", \"role_data\", \"docker_config\"]}}}}}}, \"UpdateTasks\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data, []).where($ != null).select($.get('update_tasks')).where($ != null).flatten().distinct()\", \"data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}, \"LoggingSourcesConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"let( default_format => coalesce($.data.default_format, ''), pos_file_path => coalesce($.data.pos_file_path, ''), sources => coalesce($.data.sources, {}).flatten() ) -> $sources.where($ != null).select({ 'type' => 'tail', 'tag' => $.tag, 'path' => $.path, 'format' => $.get('format', $default_format), 'pos_file' => $.get('pos_file', $pos_file_path + '/' + $.tag + '.pos') })\
0.355 | 3311: \", \"data\": {\"sources\": [{\"get_attr\": [\"LoggingConfiguration\", \"LoggingDefaultSources\"]}, {\"yaql\": {\"expression\": \"list(coalesce($.data.role_data, []).where($ != null).select($.get('logging_source')).where($ != null))\", \"data\": {\"role_data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}, {\"get_attr\": [\"LoggingConfiguration\", \"LoggingExtraSources\"]}], \"pos_file_path\": {\"get_attr\": [\"LoggingConfiguration\", \"LoggingPosFilePath\"]}, \"default_format\": {\"get_attr\": [\"LoggingConfiguration\", \"LoggingDefaultFormat\"]}}}}}}, \"HostPrepTasks\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data, []).where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()\", \"data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}, \"ServiceConfigSettings\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data.role_data, []).where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})\", \"data\": {\"role_data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}}, \"LoggingConfiguration\": {\"type\": \"OS::TripleO::LoggingConfiguration\"}, \"LoggingGroupsConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"set((coalesce($.data.default, []) + coalesce($.data.extra, []) + coalesce($.data.role_data, []).where($ != null).select($.get('logging_groups'))).flatten()).where($)\
0.355 | 3311: \", \"data\": {\"default\": {\"get_attr\": [\"LoggingConfiguration\", \"LoggingDefaultGroups\"]}, \"role_data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}, \"extra\": {\"get_attr\": [\"LoggingConfiguration\", \"LoggingExtraGroups\"]}}}}}}, \"MonitoringSubscriptionsConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"list(coalesce($.data.role_data, []).where($ != null).select($.get('monitoring_subscription')).where($ != null))\", \"data\": {\"role_data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}}, \"WorkflowTasks\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data.role_data, []).where($ != null).select($.get('workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})\", \"data\": {\"role_data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}}, \"UpgradeTasks\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data, []).where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()\", \"data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}, \"ServiceServerMetadataHook\": {\"type\": \"OS::TripleO::ServiceServerMetadataHook\", \"properties\": {\"RoleData\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}, \"DockerConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data.service_names, []).zip(coalesce($.data.docker_config, [])).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {})\", \"data\": {\"service_names\": {\"get_attr\": [\"ServiceChain\", \"role_data\", \"service_names\"]}, \"docker_config\": {\"get_attr\": [\"ServiceChain\", \"role_data\", \"docker_config\"]}}}}}}, \"GlobalConfigSettings\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"map_merge\": {\"yaql\": {\"expression\": \"list(coalesce($.data.role_data, []).where($ != null).select($.get('global_config_settings')).where($ != null))\", \"data\": {\"role_data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}}}, \"UpgradeBatchTasks\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data, []).where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()\", \"data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}, \"ServiceNames\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"filter\": [[null], {\"get_attr\": [\"ServiceChain\", \"role_data\", \"service_name\"]}]}}}, \"KollaConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data.role_data, []).where($ != null).select($.get('kolla_config')).where($ != null).reduce($1.mergeWith($2), {})\", \"data\": {\"role_data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}}, \"PuppetConfig\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"comma_delimited_list\", \"value\": {\"yaql\": {\"expression\": \"coalesce($.data, []).where($ != null).select($.get('puppet_config')).where($ != null).distinct()\", \"data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}, \"DockerPuppetTasks\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"yaql\": {\"expression\": \"dict(coalesce($.data, []).where($ != null).select($.get('docker_puppet_tasks')).where($ != null).selectMany($.items()).groupBy($[0], $[1]))\", \"data\": {\"get_attr\": [\"ServiceChain\", \"role_data\"]}}}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-metricd.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Gnocchi role.\", \"value\": {\"service_name\": \"gnocchi_metricd\", \"step_config\": \"include ::tripleo::profile::base::gnocchi::metricd\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"GnocchiServiceBase\", \"role_data\", \"config_settings\"]}, {\"gnocchi::metricd::workers\": {\"get_param\": \"GnocchiMetricdWorkers\"}}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"gnocchi_metricd_enabled\", \"command\": \"systemctl is-enabled openstack-gnocchi-metricd\", \"name\": \"Check if gnocchi_metricd is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-gnocchi-metricd' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"gnocchi_metricd_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-gnocchi-metricd is running\", \"tags\": \"step0,validation\"}, {\"when\": \"gnocchi_metricd_enabled.rc == 0\", \"name\": \"Stop gnocchi_metricd service\", \"service\": \"name=openstack-gnocchi-metricd state=stopped\", \"tags\": \"step1\"}], \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionGnocchiMetricd\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"Gnocchi service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"GnocchiMetricdWorkers\": {\"default\": \"%{::os_workers}\", \"type\": \"string\", \"description\": \"Number of workers for Gnocchi MetricD\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"MonitoringSubscriptionGnocchiMetricd\": {\"default\": \"overcloud-gnocchi-metricd\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"GnocchiServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/glance-registry-disabled.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the disabled Glance Registry role.\", \"value\": {\"service_name\": \"glance_registry_disabled\", \"upgrade_tasks\": [{\"name\": \"Stop and disable glance_registry service on upgrade\", \"service\": \"name=openstack-glance-registry state=stopped enabled=no\", \"tags\": \"step1\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Glance Registry service, disabled since ocata\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/net_ip_map.yaml": "{\"outputs\": {\"net_ip_map\": {\"description\": \"A Hash containing a mapping of network names to assigned IPs for a specific machine.\
0.355 | 3311: \", \"value\": {\"get_attr\": [\"NetIpMapValue\", \"value\"]}}}, \"heat_template_version\": \"pike\", \"parameters\": {\"TenantIpUri\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address with brackets in case of IPv6\"}, \"StorageIp\": {\"default\": \"\", \"type\": \"string\"}, \"InternalApiIp\": {\"default\": \"\", \"type\": \"string\"}, \"ExternalNetName\": {\"default\": \"external\", \"type\": \"string\", \"description\": \"The name of the external network.\"}, \"TenantNetName\": {\"default\": \"tenant\", \"type\": \"string\", \"description\": \"The name of the tenant network.\"}, \"TenantIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the tenant network\"}, \"StorageIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage network\"}, \"ManagementIpUri\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address with brackets in case of IPv6\"}, \"StorageIpUri\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address with brackets in case of IPv6\"}, \"InternalApiIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the internal_api network\"}, \"StorageMgmtIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage_mgmt network\"}, \"StorageMgmtNetName\": {\"default\": \"storage_mgmt\", \"type\": \"string\", \"description\": \"The name of the storage_mgmt network.\"}, \"TenantIp\": {\"default\": \"\", \"type\": \"string\"}, \"ControlPlaneIp\": {\"default\": \"\", \"type\": \"string\"}, \"InternalApiNetName\": {\"default\": \"internal_api\", \"type\": \"string\", \"description\": \"The name of the internal_api network.\"}, \"StorageMgmtIp\": {\"default\": \"\", \"type\": \"string\"}, \"ExternalIp\": {\"default\": \"\", \"type\": \"string\"}, \"StorageMgmtIpUri\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address with brackets in case of IPv6\"}, \"ManagementIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the management network\"}, \"InternalApiIpUri\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address with brackets in case of IPv6\"}, \"ExternalIpUri\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address with brackets in case of IPv6\"}, \"ControlPlaneSubnetCidr\": {\"default\": \"24\", \"type\": \"string\", \"description\": \"The subnet CIDR of the control plane network.\"}, \"ExternalIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the external network\"}, \"StorageNetName\": {\"default\": \"storage\", \"type\": \"string\", \"description\": \"The name of the storage network.\"}, \"ManagementIp\": {\"default\": \"\", \"type\": \"string\"}, \"ManagementNetName\": {\"default\": \"management\", \"type\": \"string\", \"description\": \"The name of the management network.\"}}, \"resources\": {\"NetIpMapValue\": {\"type\": \"OS::Heat::Value\", \"properties\": {\"type\": \"json\", \"value\": {\"map_replace\": [{\"tenant_uri\": {\"get_param\": \"TenantIpUri\"}, \"management_uri\": {\"get_param\": \"ManagementIpUri\"}, \"internal_api_subnet\": {\"get_param\": \"InternalApiIpSubnet\"}, \"ctlplane\": {\"get_param\": \"ControlPlaneIp\"}, \"external\": {\"get_param\": \"ExternalIp\"}, \"storage_mgmt\": {\"get_param\": \"StorageMgmtIp\"}, \"tenant\": {\"get_param\": \"TenantIp\"}, \"storage_uri\": {\"get_param\": \"StorageIpUri\"}, \"management\": {\"get_param\": \"ManagementIp\"}, \"tenant_subnet\": {\"get_param\": \"TenantIpSubnet\"}, \"ctlplane_uri\": {\"get_param\": \"ControlPlaneIp\"}, \"management_subnet\": {\"get_param\": \"ManagementIpSubnet\"}, \"storage\": {\"get_param\": \"StorageIp\"}, \"storage_subnet\": {\"get_param\": \"StorageIpSubnet\"}, \"external_subnet\": {\"get_param\": \"ExternalIpSubnet\"}, \"storage_mgmt_subnet\": {\"get_param\": \"StorageMgmtIpSubnet\"}, \"ctlplane_subnet\": {\"list_join\": [\"\", [{\"get_param\": \"ControlPlaneIp\"}, \"/\", {\"get_param\": \"ControlPlaneSubnetCidr\"}]]}, \"internal_api\": {\"get_param\": \"InternalApiIp\"}, \"internal_api_uri\": {\"get_param\": \"InternalApiIpUri\"}, \"external_uri\": {\"get_param\": \"ExternalIpUri\"}, \"storage_mgmt_uri\": {\"get_param\": \"StorageMgmtIpUri\"}}, {\"keys\": {\"tenant_uri\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"TenantNetName\"}}, \"template\": \"NAME_uri\"}}, \"management_uri\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"ManagementNetName\"}}, \"template\": \"NAME_uri\"}}, \"internal_api_subnet\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"InternalApiNetName\"}}, \"template\": \"NAME_subnet\"}}, \"external\": {\"get_param\": \"ExternalNetName\"}, \"storage_mgmt\": {\"get_param\": \"StorageMgmtNetName\"}, \"tenant\": {\"get_param\": \"TenantNetName\"}, \"storage_uri\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"StorageNetName\"}}, \"template\": \"NAME_uri\"}}, \"management\": {\"get_param\": \"ManagementNetName\"}, \"tenant_subnet\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"TenantNetName\"}}, \"template\": \"NAME_subnet\"}}, \"management_subnet\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"ManagementNetName\"}}, \"template\": \"NAME_subnet\"}}, \"storage\": {\"get_param\": \"StorageNetName\"}, \"storage_subnet\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"StorageNetName\"}}, \"template\": \"NAME_subnet\"}}, \"external_subnet\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"ExternalNetName\"}}, \"template\": \"NAME_subnet\"}}, \"storage_mgmt_subnet\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"StorageMgmtNetName\"}}, \"template\": \"NAME_subnet\"}}, \"internal_api\": {\"get_param\": \"InternalApiNetName\"}, \"internal_api_uri\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"InternalApiNetName\"}}, \"template\": \"NAME_uri\"}}, \"external_uri\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"ExternalNetName\"}}, \"template\": \"NAME_uri\"}}, \"storage_mgmt_uri\": {\"str_replace\": {\"params\": {\"NAME\": {\"get_param\": \"StorageMgmtNetName\"}}, \"template\": \"NAME_uri\"}}}}]}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-l3.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron L3 agent configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NeutronExternalNetworkBridge\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Name of bridge used for external network traffic. Usually L2 agent handles port wiring into external bridge, and hence the parameter should be unset.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NeutronL3AgentLoggingSource\": {\"default\": {\"path\": \"/var/log/neutron/l3-agent.log\", \"tag\": \"openstack.neutron.agent.l3\"}, \"type\": \"json\"}, \"MonitoringSubscriptionNeutronL3\": {\"default\": \"overcloud-neutron-l3-agent\", \"type\": \"string\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NeutronL3AgentMode\": {\"default\": \"legacy\", \"type\": \"string\", \"description\": \"Agent mode for L3 agent. Must be one of legacy or dvr_snat.\
0.355 | 3311: \", \"constraints\": [{\"allowed_values\": [\"legacy\", \"dvr_snat\"]}]}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron L3 agent service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNeutronL3\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronBase\", \"role_data\", \"config_settings\"]}, {\"tripleo.neutron_l3.firewall_rules\": {\"106 neutron_l3 vrrp\": {\"proto\": \"vrrp\"}}, \"neutron::agents::l3::agent_mode\": {\"get_param\": \"NeutronL3AgentMode\"}}, {\"if\": [\"external_network_bridge_empty\", {}, {\"neutron::agents::l3::external_network_bridge\": {\"get_param\": \"NeutronExternalNetworkBridge\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"neutron_l3_agent_enabled\", \"command\": \"systemctl is-enabled neutron-l3-agent\", \"name\": \"Check if neutron_l3_agent is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'neutron-l3-agent' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"neutron_l3_agent_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service neutron-l3-agent is running\", \"tags\": \"step0,validation\"}, {\"when\": \"neutron_l3_agent_enabled.rc == 0\", \"name\": \"Stop neutron_l3 service\", \"service\": \"name=neutron-l3-agent state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"neutron\"], \"service_name\": \"neutron_l3\", \"logging_source\": {\"get_param\": \"NeutronL3AgentLoggingSource\"}, \"step_config\": \"include tripleo::profile::base::neutron::l3\
0.355 | 3311: \"}}}, \"conditions\": {\"external_network_bridge_empty\": {\"equals\": [{\"get_param\": \"NeutronExternalNetworkBridge\"}, \"''\"]}}, \"resources\": {\"NeutronBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-engine.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Mistral Engine role.\", \"value\": {\"service_name\": \"mistral_engine\", \"step_config\": \"include ::tripleo::profile::base::mistral::engine\
0.355 | 3311: \", \"config_settings\": {\"get_attr\": [\"MistralBase\", \"role_data\", \"config_settings\"]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"mistral_engine_enabled\", \"command\": \"systemctl is-enabled openstack-mistral-engine\", \"name\": \"Check if mistral engine is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-mistral-engine' --property ActiveState | grep '\\\\bactive\\\\b'\
0.355 | 3311: \", \"when\": \"mistral_engine_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check if openstack-mistral-engine is running\", \"tags\": \"step0,validation\"}, {\"name\": \"Stop mistral_engine service\", \"service\": \"name=openstack-mistral-engine state=stopped\", \"tags\": \"step1\"}, {\"yum\": \"name=openstack-mistral-engine state=latest\", \"when\": \"mistral_engine_enabled.rc != 0\", \"name\": \"Install openstack-mistral-engine package if it was disabled\", \"tags\": \"step3\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"Openstack Mistral Engine service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"MistralBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-api.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Openstack Mistral API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"MistralApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Mistral API.\
0.355 | 3311: e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"MistralWorkers\": {\"default\": 1, \"type\": \"number\", \"description\": \"The number of workers for the mistral-api.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Mistral API role.\", \"value\": {\"service_name\": \"mistral_api\", \"step_config\": \"include ::tripleo::profile::base::mistral::api\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"MistralBase\", \"role_data\", \"config_settings\"]}, {\"tripleo.mistral_api.firewall_rules\": {\"133 mistral\": {\"dport\": [8989, 13989]}}, \"mistral::wsgi::apache::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}, \"mistral::api::service_name\": \"httpd\", \"mistral::policy::policies\": {\"get_param\": \"MistralApiPolicies\"}, \"mistral::api::api_workers\": {\"get_param\": \"MistralWorkers\"}, \"mistral::api::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}, \"mistral::wsgi::apache::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"MistralApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"mistral::wsgi::apache::ssl\": {\"get_param\": \"EnableInternalTLS\"}}, {\"if\": [\"mistral_workers_zero\", {}, {\"mistral::wsgi::apache::workers\": {\"get_param\": \"MistralWorkers\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"mistral_api_enabled\", \"command\": \"systemctl is-enabled openstack-mistral-api\", \"name\": \"Check if mistral api is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-mistral-api' --property ActiveState | grep '\\\\bactive\\\\b'\
0.355 | 3311: \", \"when\": \"mistral_api_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check if openstack-mistral-api is running\", \"tags\": \"step0,validation\"}, {\"ignore_errors\": true, \"shell\": \"httpd -t -D DUMP_VHOSTS | grep -q mistral_api_wsgi\", \"register\": \"mistral_api_apache\", \"name\": \"check for mistral_api running under apache (post upgrade)\", \"tags\": \"step1\"}, {\"when\": \"mistral_api_apache.rc == 0\", \"name\": \"Stop mistral_api service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}, {\"when\": \"mistral_api_enabled.rc == 0\", \"name\": \"Stop and disable mistral_api service (pre-upgrade not under httpd)\", \"service\": \"name=openstack-mistral-api state=stopped enabled=no\", \"tags\": \"step1\"}], \"service_config_settings\": {\"get_attr\": [\"MistralBase\", \"role_data\", \"service_config_settings\"]}}}}, \"conditions\": {\"mistral_workers_zero\": {\"equals\": [{\"get_param\": \"MistralWorkers\"}, 0]}}, \"resources\": {\"MistralBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Aodh role.\", \"value\": {\"service_name\": \"aodh_base\", \"config_settings\": {\"aodh_redis_password\": {\"get_param\": \"RedisPassword\"}, \"aodh::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"AodhDebug\"}]}, \"aodh::db::database_connection\": {\"make_url\": {\"username\": \"aodh\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"AodhPassword\"}, \"path\": \"/aodh\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"aodh::keystone::authtoken::project_domain_name\": \"Default\", \"aodh::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"aodh::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"aodh::keystone::authtoken::project_name\": \"service\", \"aodh::keystone::authtoken::user_domain_name\": \"Default\", \"aodh::auth::auth_password\": {\"get_param\": \"AodhPassword\"}, \"aodh::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"aodh::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"aodh::auth::auth_tenant_name\": \"service\", \"aodh::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"aodh::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"aodh::auth::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"aodh::auth::auth_region\": {\"get_param\": \"KeystoneRegion\"}, \"aodh::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"aodh::keystone::authtoken::password\": {\"get_param\": \"AodhPassword\"}}, \"service_config_settings\": {\"keystone\": {\"aodh::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"AodhAdmin\", \"uri\"]}, \"aodh::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"AodhPublic\", \"uri\"]}, \"aodh::keystone::auth::tenant\": \"service\", \"aodh::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"AodhInternal\", \"uri\"]}, \"aodh::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"aodh::keystone::auth::password\": {\"get_param\": \"AodhPassword\"}}, \"mysql\": {\"aodh::db::mysql::user\": \"aodh\", \"aodh::db::mysql::dbname\": \"aodh\", \"aodh::db::mysql::password\": {\"get_param\": \"AodhPassword\"}, \"aodh::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"aodh::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"AodhDebug\"}, \"\"]}}, \"description\": \"OpenStack Aodh service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RedisPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the redis service account.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"AodhDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Aodh services.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"AodhPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the aodh services.\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/ci/common/net-config-multinode.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The OsNetConfigImpl resource.\", \"value\": {\"get_resource\": \"OsNetConfigImpl\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.\
0.355 | 3311: \", \"parameters\": {\"StorageIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage network\"}, \"TenantIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the tenant network\"}, \"ManagementIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the management network\"}, \"ExternalIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the external network\"}, \"ControlPlaneSubnetCidr\": {\"default\": \"24\", \"type\": \"string\", \"description\": \"The subnet CIDR of the control plane network.\"}, \"StorageMgmtIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage_mgmt network\"}, \"ControlPlaneIp\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the ctlplane network\"}, \"InternalApiIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the internal_api network\"}}, \"resources\": {\"OsNetConfigImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"str_replace\": {\"params\": {\"CONTROLPLANESUBNETCIDR\": {\"get_param\": \"ControlPlaneSubnetCidr\"}, \"CONTROLPLANEIP\": {\"get_param\": \"ControlPlaneIp\"}}, \"template\": \"#!/bin/bash\
0.355 | 3311: if ! ip addr show dev $bridge_name | grep CONTROLPLANEIP/CONTROLPLANESUBNETCIDR; then\
0.355 | 3311: ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name\
0.355 | 3311: fi\
0.355 | 3311: \"}}, \"inputs\": [{\"default\": \"br-ex\", \"type\": \"String\", \"name\": \"bridge_name\", \"description\": \"bridge-name\"}]}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Shared role data for the Mistral services.\", \"value\": {\"service_name\": \"mistral_base\", \"config_settings\": {\"mistral::keystone::authtoken::user_domain_name\": \"Default\", \"mistral::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"mistral::keystone_ec2_uri\": {\"list_join\": [\"\", [{\"get_param\": [\"EndpointMap\", \"KeystoneV3Internal\", \"uri\"]}, \"/ec2tokens\"]]}, \"mistral::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"mistral::keystone::authtoken::project_name\": \"service\", \"mistral::database_connection\": {\"make_url\": {\"username\": \"mistral\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"MistralPassword\"}, \"path\": \"/mistral\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"mistral::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"mistral::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"mistral::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"MistralDebug\"}]}, \"mistral::keystone::authtoken::project_domain_name\": \"Default\", \"mistral::keystone::authtoken::password\": {\"get_param\": \"MistralPassword\"}, \"mistral::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"mistral::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"mistral::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri\"]}}, \"service_config_settings\": {\"keystone\": {\"mistral::keystone::auth::password\": {\"get_param\": \"MistralPassword\"}, \"mistral::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"mistral::keystone::auth::tenant\": \"service\", \"mistral::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"MistralInternal\", \"uri\"]}, \"mistral::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"MistralPublic\", \"uri\"]}, \"mistral::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"MistralAdmin\", \"uri\"]}}, \"mysql\": {\"mistral::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"mistral::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"mistral::db::mysql::password\": {\"get_param\": \"MistralPassword\"}, \"mistral::db::mysql::user\": \"mistral\", \"mistral::db::mysql::dbname\": \"mistral\"}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"service_debug_unset\": {\"equals\": [{\"get_param\": \"MistralDebug\"}, \"\"]}}, \"description\": \"Openstack Mistral base service. Shared for all Mistral services.\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"MistralPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the Mistral service and db account, used by the Mistral services.\"}, \"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"MistralDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Mistral services.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/controller-config.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"A software config which runs puppet on the Controller role\
0.355 | 3311: \", \"parameters\": {\"ConfigDebug\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to run config management (e.g. Puppet) in debug mode.\"}, \"StepConfig\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Config manifests that will be used to step through the deployment.\"}, \"PuppetTags\": {\"default\": \"\", \"type\": \"string\", \"description\": \"List of comma-separated tags to limit puppet catalog to.\"}}, \"outputs\": {\"OS::stack_id\": {\"description\": \"The software config which runs puppet on the Controller role\", \"value\": {\"get_resource\": \"ControllerPuppetConfigImpl\"}}}, \"conditions\": {\"puppet_tags_empty\": {\"equals\": [{\"get_param\": \"PuppetTags\"}, \"\"]}}, \"resources\": {\"ControllerPuppetConfigImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"outputs\": [{\"name\": \"result\"}], \"inputs\": [{\"type\": \"Number\", \"name\": \"step\"}], \"config\": {\"list_join\": [\"\", [{\"str_replace\": {\"params\": {\"__ROLE__\": \"controller\"}, \"template\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/manifests/overcloud_role.pp\"}}}, {\"get_param\": \"StepConfig\"}]]}, \"options\": {\"enable_debug\": {\"get_param\": \"ConfigDebug\"}, \"modulepath\": \"/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules\", \"enable_hiera\": true, \"enable_facter\": false, \"tags\": {\"if\": [\"puppet_tags_empty\", \"\", {\"list_join\": [\",\", [\"file,concat,file_line,augeas\", {\"get_param\": \"PuppetTags\"}]]}]}}, \"group\": \"puppet\"}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2-ovn.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron ML2/OVN plugin.\", \"value\": {\"service_name\": \"neutron_plugin_ml2_ovn\", \"step_config\": \"include ::tripleo::profile::base::neutron::plugins::ml2\", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronMl2Base\", \"role_data\", \"config_settings\"]}, {\"neutron::plugins::ml2::ovn::ovsdb_connection_timeout\": {\"get_param\": \"OVNDbConnectionTimeout\"}, \"neutron::plugins::ml2::ovn::vif_type\": {\"get_param\": \"OVNVifType\"}, \"ovn::southbound::port\": {\"get_param\": \"OVNSouthboundServerPort\"}, \"neutron::server::qos_notification_drivers\": {\"get_param\": \"OVNQosDriver\"}, \"neutron::plugins::ml2::ovn::neutron_sync_mode\": {\"get_param\": \"OVNNeutronSyncMode\"}, \"neutron::plugins::ml2::max_header_size\": {\"get_param\": \"NeutronGeneveMaxHeaderSize\"}, \"neutron::plugins::ml2::ovn::ovn_l3_mode\": true}]}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron ML2/OVN plugin configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"OVNDbConnectionTimeout\": {\"default\": 180, \"type\": \"number\", \"description\": \"Timeout in seconds for the OVSDB connection transaction\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"OVNNeutronSyncMode\": {\"default\": \"log\", \"type\": \"string\", \"description\": \"The synchronization mode of OVN with Neutron DB\", \"constraints\": [{\"allowed_values\": [\"log\", false, \"repair\"]}]}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"OVNVifType\": {\"default\": \"ovs\", \"type\": \"string\", \"description\": \"Type of VIF to be used for ports\", \"constraints\": [{\"allowed_values\": [\"ovs\", \"vhostuser\"]}]}, \"OVNSouthboundServerPort\": {\"default\": 6642, \"type\": \"number\", \"description\": \"Port of the OVN Southbound DB server\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"OVNQosDriver\": {\"default\": null, \"type\": \"string\", \"description\": \"OVN notification driver for Neutron QOS service plugin\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NeutronGeneveMaxHeaderSize\": {\"default\": 38, \"type\": \"number\", \"description\": \"Geneve encapsulation header size\"}}, \"resources\": {\"NeutronMl2Base\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/panko-api.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Panko API service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionPankoApi\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"PankoBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"panko::wsgi::apache::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"panko::wsgi::apache::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"panko::api::host\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"tripleo.panko_api.firewall_rules\": {\"140 panko-api\": {\"dport\": [8977, 13977]}}, \"panko::api::service_name\": \"httpd\", \"panko::wsgi::apache::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"PankoApiNetwork\"]}, \"panko::policy::policies\": {\"get_param\": \"PankoApiPolicies\"}, \"panko::api::enable_proxy_headers_parsing\": true}]}, \"upgrade_tasks\": {\"list_concat\": [{\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"upgrade_tasks\"]}, [{\"ignore_errors\": true, \"register\": \"httpd_enabled\", \"command\": \"systemctl is-enabled httpd\", \"name\": \"Check if httpd is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'httpd' --property ActiveState | grep '\\\\bactive\\\\b'\
0.355 | 3311: \", \"when\": \"httpd_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check if httpd is running\", \"tags\": \"step0,validation\"}, {\"when\": \"httpd_enabled.rc == 0\", \"name\": \"Stop panko-api service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}, {\"yum\": \"name=openstack-panko-api state=latest\", \"name\": \"Install openstack-panko-api package if it was not installed\", \"tags\": \"step3\"}]]}, \"service_config_settings\": {\"get_attr\": [\"PankoBase\", \"role_data\", \"service_config_settings\"]}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"service_name\": \"panko_api\", \"step_config\": \"include tripleo::profile::base::panko::api\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Panko API service configured with Puppet. Note, This service is deprecated in Pike release and will be disabled in future releases.\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"PankoApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Panko API.\
0.355 | 3311: e.g. { panko-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"MonitoringSubscriptionPankoApi\": {\"default\": \"overcloud-ceilometer-panko-api\", \"type\": \"string\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"PankoBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/panko-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sshd.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the ssh\", \"value\": {\"service_name\": \"sshd\", \"step_config\": \"include ::tripleo::profile::base::sshd\", \"config_settings\": {\"tripleo::profile::base::sshd::motd\": {\"get_param\": \"MessageOfTheDay\"}, \"tripleo::profile::base::sshd::bannertext\": {\"get_param\": \"BannerText\"}, \"tripleo::profile::base::sshd::options\": {\"get_param\": \"SshServerOptions\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"Configure sshd_config\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"SshServerOptions\": {\"default\": {\"Subsystem\": \"sftp /usr/libexec/openssh/sftp-server\", \"UsePAM\": \"yes\", \"HostKey\": [\"/etc/ssh/ssh_host_rsa_key\", \"/etc/ssh/ssh_host_ecdsa_key\", \"/etc/ssh/ssh_host_ed25519_key\"], \"GSSAPICleanupCredentials\": \"no\", \"SyslogFacility\": \"AUTHPRIV\", \"GSSAPIAuthentication\": \"yes\", \"PasswordAuthentication\": \"no\", \"AuthorizedKeysFile\": \".ssh/authorized_keys\", \"AcceptEnv\": [\"LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES\", \"LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT\", \"LC_IDENTIFICATION LC_ALL LANGUAGE\", \"XMODIFIERS\"], \"UsePrivilegeSeparation\": \"sandbox\", \"X11Forwarding\": \"yes\", \"ChallengeResponseAuthentication\": \"no\"}, \"type\": \"json\", \"description\": \"Mapping of sshd_config values\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"BannerText\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Configures Banner text in sshd_config\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"MessageOfTheDay\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Configures /etc/motd text\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/iscsid.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for iscsid\", \"value\": {\"service_name\": \"iscsid\", \"step_config\": \"include ::tripleo::profile::base::iscsid\", \"config_settings\": {}}}}, \"heat_template_version\": \"pike\", \"description\": \"Configure iscsid\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/net-config-bridge.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The OsNetConfigImpl resource.\", \"value\": {\"get_resource\": \"OsNetConfigImpl\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Software Config to drive os-net-config for a simple bridge.\
0.355 | 3311: \", \"parameters\": {\"StorageIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage network\"}, \"StorageMgmtIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage_mgmt network\"}, \"TenantIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the tenant network\"}, \"ManagementIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the management network\"}, \"ControlPlaneIp\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the ctlplane network\"}, \"InternalApiIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the internal_api network\"}, \"ExternalIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the external network\"}}, \"resources\": {\"OsNetConfigImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"str_replace\": {\"params\": {\"$network_config\": {\"network_config\": [{\"use_dhcp\": true, \"type\": \"ovs_bridge\", \"name\": \"bridge_name\", \"members\": [{\"type\": \"interface\", \"name\": \"interface_name\", \"primary\": true}]}]}}, \"template\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/scripts/run-os-net-config.sh\"}}}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-vnc-proxy.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova Vncproxy service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaVNCProxy\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, {\"nova::vncproxy::enabled\": true, \"nova::vncproxy::common::vncproxy_host\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"host_nobrackets\"]}, \"nova::vncproxy::common::vncproxy_protocol\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"protocol\"]}, \"nova::vncproxy::common::vncproxy_port\": {\"get_param\": [\"EndpointMap\", \"NovaVNCProxyPublic\", \"port\"]}, \"tripleo.nova_vnc_proxy.firewall_rules\": {\"137 nova_vnc_proxy\": {\"dport\": [6080, 13080]}}, \"nova::vncproxy::host\": {\"get_param\": [\"ServiceNetMap\", \"NovaApiNetwork\"]}}]}, \"upgrade_tasks\": [{\"name\": \"Stop nova_vnc_proxy service\", \"service\": \"name=openstack-nova-consoleauth state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"nova\"], \"service_name\": \"nova_vnc_proxy\", \"logging_source\": {\"get_param\": \"NovaVncproxyLoggingSource\"}, \"step_config\": \"include tripleo::profile::base::nova::vncproxy\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova Vncproxy service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NovaVncproxyLoggingSource\": {\"default\": {\"path\": \"/var/log/nova/nova-vncproxy.log\", \"tag\": \"openstack.nova.vncproxy\"}, \"type\": \"json\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"MonitoringSubscriptionNovaVNCProxy\": {\"default\": \"overcloud-nova-vncproxy\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-consoleauth.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Nova Consoleauth service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaConsoleauth\"}, \"config_settings\": {\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, \"upgrade_tasks\": [{\"name\": \"Stop nova_consoleauth service\", \"service\": \"name=openstack-nova-consoleauth state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"nova\"], \"service_name\": \"nova_consoleauth\", \"logging_source\": {\"get_param\": \"NovaConsoleauthLoggingSource\"}, \"step_config\": \"include tripleo::profile::base::nova::consoleauth\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Nova Consoleauth service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"MonitoringSubscriptionNovaConsoleauth\": {\"default\": \"overcloud-nova-consoleauth\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NovaConsoleauthLoggingSource\": {\"default\": {\"path\": \"/var/log/nova/nova-consoleauth.log\", \"tag\": \"openstack.nova.consoleauth\"}, \"type\": \"json\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/net-config-static-bridge.yaml": "{\"outputs\": {\"OS::stack_id\": {\"description\": \"The OsNetConfigImpl resource.\", \"value\": {\"get_resource\": \"OsNetConfigImpl\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.\
0.355 | 3311: \", \"parameters\": {\"StorageIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage network\"}, \"EC2MetadataIp\": {\"type\": \"string\", \"description\": \"The IP address of the EC2 metadata server.\"}, \"TenantIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the tenant network\"}, \"ManagementIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the management network\"}, \"ExternalIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the external network\"}, \"ControlPlaneSubnetCidr\": {\"default\": \"24\", \"type\": \"string\", \"description\": \"The subnet CIDR of the control plane network.\"}, \"ControlPlaneDefaultRoute\": {\"type\": \"string\", \"description\": \"The default route of the control plane network.\"}, \"DnsServers\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.\"}, \"StorageMgmtIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the storage_mgmt network\"}, \"ControlPlaneIp\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the ctlplane network\"}, \"InternalApiIpSubnet\": {\"default\": \"\", \"type\": \"string\", \"description\": \"IP address/subnet on the internal_api network\"}}, \"resources\": {\"OsNetConfigImpl\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"script\", \"config\": {\"str_replace\": {\"params\": {\"$network_config\": {\"network_config\": [{\"dns_servers\": {\"get_param\": \"DnsServers\"}, \"name\": \"bridge_name\", \"members\": [{\"type\": \"interface\", \"name\": \"interface_name\", \"primary\": true}], \"routes\": [{\"ip_netmask\": \"169.254.169.254/32\", \"next_hop\": {\"get_param\": \"EC2MetadataIp\"}}, {\"default\": true, \"next_hop\": {\"get_param\": \"ControlPlaneDefaultRoute\"}}], \"use_dhcp\": false, \"type\": \"ovs_bridge\", \"addresses\": [{\"ip_netmask\": {\"list_join\": [\"/\", [{\"get_param\": \"ControlPlaneIp\"}, {\"get_param\": \"ControlPlaneSubnetCidr\"}]]}}]}]}}, \"template\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/scripts/run-os-net-config.sh\"}}}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/redis-base.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the redis role.\", \"value\": {\"service_name\": \"redis_base\", \"config_settings\": {\"redis::masterauth\": {\"get_param\": \"RedisPassword\"}, \"redis::sentinel::notification_script\": \"/usr/local/bin/redis-notifications.sh\", \"redis::port\": 6379, \"redis::sentinel::redis_host\": \"%{hiera('bootstrap_nodeid_ip')}\", \"redis::sentinel_auth_pass\": {\"get_param\": \"RedisPassword\"}, \"redis::bind\": {\"if\": [\"use_tls_proxy\", \"localhost\", {\"get_param\": [\"ServiceNetMap\", \"RedisNetwork\"]}]}, \"redis::sentinel::master_name\": \"%{hiera('bootstrap_nodeid')}\", \"redis::sentinel::sentinel_bind\": {\"if\": [\"use_tls_proxy\", \"localhost\", {\"get_param\": [\"ServiceNetMap\", \"RedisNetwork\"]}]}, \"redis::ulimit\": {\"get_param\": \"RedisFDLimit\"}, \"redis::requirepass\": {\"get_param\": \"RedisPassword\"}}}}}, \"heat_template_version\": \"pike\", \"conditions\": {\"use_tls_proxy\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}}, \"description\": \"OpenStack Redis service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RedisPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the redis service account.\"}, \"RedisFDLimit\": {\"default\": 10240, \"type\": \"string\", \"description\": \"Configure Redis FD limit\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/logging/fluentd-config.yaml": "{\"outputs\": {\"LoggingSSLCertificate\": {\"value\": {\"get_param\": \"LoggingSSLCertificate\"}}, \"LoggingDefaultFilters\": {\"value\": {\"get_param\": \"LoggingDefaultFilters\"}}, \"LoggingServers\": {\"value\": {\"get_param\": \"LoggingServers\"}}, \"LoggingPosFilePath\": {\"value\": {\"get_param\": \"LoggingPosFilePath\"}}, \"LoggingExtraFilters\": {\"value\": {\"get_param\": \"LoggingExtraFilters\"}}, \"LoggingDefaultGroups\": {\"value\": {\"get_param\": \"LoggingDefaultGroups\"}}, \"LoggingDefaultFormat\": {\"value\": {\"get_param\": \"LoggingDefaultFormat\"}}, \"LoggingExtraGroups\": {\"value\": {\"get_param\": \"LoggingExtraGroups\"}}, \"LoggingDefaultSources\": {\"value\": {\"get_param\": \"LoggingDefaultSources\"}}, \"LoggingSSLKeyPassphrase\": {\"value\": {\"get_param\": \"LoggingSSLKeyPassphrase\"}}, \"LoggingSSLKey\": {\"value\": {\"get_param\": \"LoggingSSLKey\"}}, \"LoggingExtraSources\": {\"value\": {\"get_param\": \"LoggingExtraSources\"}}, \"LoggingSharedKey\": {\"value\": {\"get_param\": \"LoggingSharedKey\"}}, \"LoggingUsesSSL\": {\"value\": {\"get_param\": \"LoggingUsesSSL\"}}}, \"heat_template_version\": \"pike\", \"description\": \"Fluentd logging configuration\", \"parameters\": {\"LoggingDefaultSources\": {\"default\": [], \"type\": \"json\", \"description\": \"A list of default logging sources for fluentd. You should only override this parameter if you wish to disable the default logging sources. Use LoggingExtraSources to define additional source configurations.\
0.355 | 3311: \"}, \"LoggingPosFilePath\": {\"default\": \"/var/cache/fluentd\", \"type\": \"string\", \"description\": \"Directory in which to place fluentd pos_file files (used to track file position for the 'tail' input type).\
0.355 | 3311: \"}, \"LoggingDefaultFormat\": {\"default\": \"/(?<time>\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2}.\\\\d+) (?<pid>\\\\d+) (?<priority>\\\\S+) (?<message>.*)$/\", \"type\": \"string\", \"description\": \"Default format used to parse messages from log files.\
0.355 | 3311: \"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"LoggingServers\": {\"default\": [], \"type\": \"json\", \"description\": \"A list of destinations to which fluentd will forward log messages. Expects\
0.355 | 3311: a list of dictionaries of the form:\
0.355 | 3311: \
0.355 | 3311: - host: loghost1.example.com\
0.355 | 3311: port: 24224\
0.355 | 3311: - host: loghost2.example.com\
0.355 | 3311: port: 24224\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\
0.355 | 3311: \"}, \"LoggingUsesSSL\": {\"default\": false, \"type\": \"boolean\", \"description\": \"A boolean value indicating whether or not we should forward log messages use the secure_forward plugin.\
0.355 | 3311: \"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"LoggingSharedKey\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Shared secret for fluentd secure-forward plugin.\
0.355 | 3311: \"}, \"LoggingSSLCertificate\": {\"default\": \"\", \"type\": \"string\", \"description\": \"PEM-encoded SSL CA certificate for fluentd.\
0.355 | 3311: \"}, \"LoggingDefaultFilters\": {\"default\": [{\"tag_pattern\": \"**\", \"record\": {\"host\": \"${hostname}\"}, \"type\": \"record_transformer\"}, {\"tag_pattern\": \"openstack.**\", \"record\": {\"component\": \"${tag_parts[1]}\"}, \"type\": \"record_transformer\"}], \"type\": \"json\", \"description\": \"A list of fluentd default filters. This will be passed verbatim to the 'filter' key of a fluentd::config resource. Only override this if you do not want the default set of filters; use LoggingExtraFilters if you just want to add additional servers.\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"LoggingDefaultGroups\": {\"default\": [\"root\"], \"type\": \"comma_delimited_list\", \"description\": \"Make fluentd user a member of these groups. Only override this parameter if you want to modify the default list of groups. Use LoggingExtraGroups to add the fluentd user to additional groups.\
0.355 | 3311: \"}, \"LoggingExtraFilters\": {\"default\": [], \"type\": \"json\", \"description\": \"A list of additional fluentd filters. This will be passed verbatim to the 'filter' key of a fluentd::config resource.\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"LoggingSSLKeyPassphrase\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Passphrase for LoggingSSLKey (used by in_secure_forward).\
0.355 | 3311: \"}, \"LoggingSSLKey\": {\"default\": \"\", \"type\": \"string\", \"description\": \"PEM-encoded key for fluentd CA certificate (used by in_secure_forward).\
0.355 | 3311: \"}, \"LoggingExtraSources\": {\"default\": [], \"type\": \"json\", \"description\": \"A list of additional logging sources for fluentd. These will be combined with the LoggingDefaultSources and any logging sources defined by composable services.\
0.355 | 3311: \"}, \"LoggingExtraGroups\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"Make fluentd user a member of these groups (in addition to LoggingDefaultGroups and the groups provided by individual composable services).\
0.355 | 3311: \"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker/haproxy.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the HAproxy with pacemaker role.\", \"value\": {\"metadata_settings\": {\"get_attr\": [\"LoadbalancerServiceBase\", \"role_data\", \"metadata_settings\"]}, \"service_name\": \"haproxy\", \"step_config\": \"include ::tripleo::profile::pacemaker::haproxy\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"LoadbalancerServiceBase\", \"role_data\", \"config_settings\"]}, {\"tripleo::haproxy::mysql_clustercheck\": true, \"tripleo::haproxy::haproxy_service_manage\": false}]}, \"monitoring_subscription\": {\"get_attr\": [\"LoadbalancerServiceBase\", \"role_data\", \"monitoring_subscription\"]}}}}, \"heat_template_version\": \"pike\", \"description\": \"HAproxy service with Pacemaker configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"LoadbalancerServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/haproxy.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2-nuage.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron ML2/Nuage plugin\", \"value\": {\"service_name\": \"neutron_plugin_ml2_nuage\", \"step_config\": \"include tripleo::profile::base::neutron::plugins::ml2\", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronML2Base\", \"role_data\", \"config_settings\"]}, {\"nova::api::use_forwarded_for\": {\"get_param\": \"UseForwardedFor\"}, \"neutron::plugins::ml2::nuage::nuage_vsd_username\": {\"get_param\": \"NeutronNuageVSDUsername\"}, \"neutron::plugins::ml2::nuage::nuage_cms_id\": {\"get_param\": \"NeutronNuageCMSId\"}, \"neutron::plugins::ml2::nuage::nuage_vsd_organization\": {\"get_param\": \"NeutronNuageVSDOrganization\"}, \"neutron::plugins::ml2::nuage::nuage_vsd_ip\": {\"get_param\": \"NeutronNuageVSDIp\"}, \"nova::patch::config::monkey_patch_modules\": {\"get_param\": \"NovaPatchConfigMonkeyPatchModules\"}, \"neutron::plugins::ml2::nuage::nuage_vsd_password\": {\"get_param\": \"NeutronNuageVSDPassword\"}, \"neutron::plugins::ml2::nuage::nuage_net_partition_name\": {\"get_param\": \"NeutronNuageNetPartitionName\"}, \"neutron::plugins::ml2::nuage::nuage_base_uri_version\": {\"get_param\": \"NeutronNuageBaseURIVersion\"}, \"nova::patch::config::monkey_patch\": {\"get_param\": \"NovaPatchConfigMonkeyPatch\"}}]}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron ML2/Nuage plugin configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NeutronNuageVSDPassword\": {\"type\": \"string\", \"description\": \"Password to be used to log into VSD\"}, \"NovaPatchConfigMonkeyPatch\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Apply monkey patching or not\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NeutronNuageCMSId\": {\"type\": \"string\", \"description\": \"Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"UseForwardedFor\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.\"}, \"NeutronNuageVSDOrganization\": {\"default\": \"organization\", \"type\": \"string\", \"description\": \"Organization parameter required to log into VSD\"}, \"NovaPatchConfigMonkeyPatchModules\": {\"default\": \"\", \"type\": \"comma_delimited_list\", \"description\": \"List of modules/decorators to monkey patch\"}, \"NeutronNuageVSDIp\": {\"type\": \"string\", \"description\": \"IP address and port of the Virtual Services Directory\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NeutronNuageVSDUsername\": {\"type\": \"string\", \"description\": \"Username to be used to log into VSD\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NeutronNuageBaseURIVersion\": {\"default\": \"default_uri_version\", \"type\": \"string\", \"description\": \"URI version to be used based on the VSD release\"}, \"NeutronNuageNetPartitionName\": {\"default\": \"default_name\", \"type\": \"string\", \"description\": \"Specifies the title that you will see on the VSD\"}}, \"resources\": {\"NeutronML2Base\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/post.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Post-deploy configuration steps via puppet for all roles, as defined in ../roles_data.yaml\
0.355 | 3311: \", \"parameters\": {\"DockerPuppetDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debug logging with docker-puppet.py\"}, \"role_data\": {\"type\": \"json\", \"description\": \"Mapping of Role name e.g Controller to the per-role data\"}, \"stack_name\": {\"type\": \"string\", \"description\": \"Name of the topmost stack\"}, \"servers\": {\"type\": \"json\", \"description\": \"Mapping of Role name e.g Controller to a list of servers\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"DeployIdentifier\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Setting this to a unique value will re-run any deployment tasks which perform configuration on a Heat stack-update.\
0.355 | 3311: \"}, \"DockerPuppetProcessCount\": {\"default\": 3, \"type\": \"number\", \"description\": \"Number of concurrent processes to use when running docker-puppet to generate config files.\"}, \"ctlplane_service_ips\": {\"type\": \"json\"}}, \"outputs\": {\"RoleConfig\": {\"description\": \"Mapping of config data for all roles\", \"value\": {\"upgrade_steps_tasks\": \"- include: Controller/upgrade_tasks.yaml\
0.355 | 3311: when: role_name == 'Controller'\
0.355 | 3311: \", \"update_steps_tasks\": \"- include: Controller/update_tasks.yaml\
0.355 | 3311: when: role_name == 'Controller'\
0.355 | 3311: \", \"update_steps_playbook\": \"- hosts: overcloud\
0.355 | 3311: serial: 1\
0.355 | 3311: tasks:\
0.355 | 3311: - include: update_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: - include: deploy_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: \", \"deploy_steps_playbook\": \"- hosts: overcloud\
0.355 | 3311: tasks:\
0.355 | 3311: - include: Controller/host_prep_tasks.yaml\
0.355 | 3311: when: role_name == 'Controller'\
0.355 | 3311: - include: deploy_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: \", \"upgrade_steps_playbook\": \"- hosts: overcloud\
0.355 | 3311: tasks:\
0.355 | 3311: - include: upgrade_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\
0.355 | 3311: - include: deploy_steps_tasks.yaml\
0.355 | 3311: with_sequence: start=0 end=5\
0.355 | 3311: loop_control:\
0.355 | 3311: loop_var: step\", \"deploy_steps_tasks\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/deploy-steps-tasks.yaml\"}}}}, \"conditions\": {\"WorkflowTasks_Step1_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step1\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step4_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step4\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step3_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step3\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step5_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step5\"]}, \"\"]}}, false]}, \"WorkflowTasks_Step2_Enabled\": {\"or\": [{\"not\": {\"equals\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\", \"step2\"]}, \"\"]}}, false]}}, \"resources\": {\"ControllerPostConfig\": {\"depends_on\": [\"ControllerExtraConfigPost\"], \"type\": \"OS::TripleO::Tasks::ControllerPostConfig\", \"properties\": {\"input_values\": {\"update_identifier\": {\"get_param\": \"DeployIdentifier\"}}, \"servers\": {\"get_param\": \"servers\"}}}, \"WorkflowTasks_Step5_Execution\": {\"depends_on\": \"WorkflowTasks_Step5\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step5\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step5\"}}}}, \"condition\": \"WorkflowTasks_Step5_Enabled\"}, \"WorkflowTasks_Step3_Execution\": {\"depends_on\": \"WorkflowTasks_Step3\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step3\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step3\"}}}}, \"condition\": \"WorkflowTasks_Step3_Enabled\"}, \"ControllerHostPrepDeployment\": {\"type\": \"OS::Heat::SoftwareDeploymentGroup\", \"properties\": {\"config\": {\"get_resource\": \"ControllerHostPrepConfig\"}, \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerPreConfig\": {\"depends_on\": \"ControllerHostPrepDeployment\", \"type\": \"OS::TripleO::Tasks::ControllerPreConfig\", \"properties\": {\"input_values\": {\"update_identifier\": {\"get_param\": \"DeployIdentifier\"}}, \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step5\": {\"depends_on\": [\"WorkflowTasks_Step5_Execution\", \"ControllerDeployment_Step4\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 5}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step5\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step1\": {\"depends_on\": [\"WorkflowTasks_Step1_Execution\", \"ControllerPreConfig\", \"ControllerArtifactsDeploy\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 1}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step1\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step2\": {\"depends_on\": [\"WorkflowTasks_Step2_Execution\", \"ControllerDeployment_Step1\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 2}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step2\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step3\": {\"depends_on\": [\"WorkflowTasks_Step3_Execution\", \"ControllerDeployment_Step2\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 3}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step3\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"ControllerDeployment_Step4\": {\"depends_on\": [\"WorkflowTasks_Step4_Execution\", \"ControllerDeployment_Step3\"], \"type\": \"OS::TripleO::DeploymentSteps\", \"properties\": {\"input_values\": {\"docker_puppet_debug\": {\"get_param\": \"DockerPuppetDebug\"}, \"update_identifier\": {\"get_param\": \"DeployIdentifier\"}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"docker_puppet_process_count\": {\"get_param\": \"DockerPuppetProcessCount\"}, \"role_name\": \"Controller\", \"step\": 4}, \"config\": {\"get_resource\": \"RoleConfig\"}, \"name\": \"ControllerDeployment_Step4\", \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"WorkflowTasks_Step5\": {\"depends_on\": [\"ControllerDeployment_Step4\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step5')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step5\"]]}}, \"condition\": \"WorkflowTasks_Step5_Enabled\"}, \"WorkflowTasks_Step1\": {\"depends_on\": [\"ControllerPreConfig\", \"ControllerArtifactsDeploy\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step1')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step1\"]]}}, \"condition\": \"WorkflowTasks_Step1_Enabled\"}, \"RoleConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"inputs\": [{\"name\": \"step\"}, {\"name\": \"role_name\"}, {\"name\": \"update_identifier\"}, {\"name\": \"bootstrap_server_id\"}, {\"name\": \"docker_puppet_debug\"}, {\"name\": \"docker_puppet_process_count\"}], \"config\": {\"str_replace\": {\"params\": {\"_TASKS\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/deploy-steps-tasks.yaml\"}}, \"template\": \"- hosts: localhost\
0.355 | 3311: connection: local\
0.355 | 3311: tasks:\
0.355 | 3311: _TASKS\
0.355 | 3311: \"}}, \"options\": {\"modulepath\": \"/usr/share/ansible-modules\"}, \"group\": \"ansible\"}}, \"WorkflowTasks_Step2_Execution\": {\"depends_on\": \"WorkflowTasks_Step2\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step2\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step2\"}}}}, \"condition\": \"WorkflowTasks_Step2_Enabled\"}, \"WorkflowTasks_Step4\": {\"depends_on\": [\"ControllerDeployment_Step3\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step4')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step4\"]]}}, \"condition\": \"WorkflowTasks_Step4_Enabled\"}, \"WorkflowTasks_Step1_Execution\": {\"depends_on\": \"WorkflowTasks_Step1\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step1\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step1\"}}}}, \"condition\": \"WorkflowTasks_Step1_Enabled\"}, \"WorkflowTasks_Step3\": {\"depends_on\": [\"ControllerDeployment_Step2\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step3')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step3\"]]}}, \"condition\": \"WorkflowTasks_Step3_Enabled\"}, \"ControllerArtifactsConfig\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/deploy-artifacts.yaml\"}, \"ControllerExtraConfigPost\": {\"depends_on\": [\"ControllerDeployment_Step5\"], \"type\": \"OS::TripleO::NodeExtraConfigPost\", \"properties\": {\"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"WorkflowTasks_Step2\": {\"depends_on\": [\"ControllerDeployment_Step1\"], \"type\": \"OS::Mistral::Workflow\", \"properties\": {\"tasks\": {\"yaql\": {\"expression\": \"$.data.where($ != '').select($.get('step2')).where($ != null).flatten()\", \"data\": [{\"get_param\": [\"role_data\", \"Controller\", \"workflow_tasks\"]}]}}, \"type\": \"direct\", \"name\": {\"list_join\": [\".\", [\"tripleo\", {\"get_param\": \"stack_name\"}, \"workflow_tasks\", \"step2\"]]}}, \"condition\": \"WorkflowTasks_Step2_Enabled\"}, \"ControllerHostPrepConfig\": {\"type\": \"OS::Heat::SoftwareConfig\", \"properties\": {\"group\": \"ansible\", \"config\": {\"str_replace\": {\"params\": {\"_PLAYBOOK\": [{\"connection\": \"local\", \"tasks\": {\"list_concat\": [{\"get_param\": [\"role_data\", \"Controller\", \"host_prep_tasks\"]}, [{\"name\": \"Create /var/lib/tripleo-config directory\", \"file\": \"path=/var/lib/tripleo-config state=directory\"}, {\"copy\": \"content=\\\"{{puppet_step_config}}\\\" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes mode=0600\", \"name\": \"Write the puppet step_config manifest\"}, {\"name\": \"Create /var/lib/docker-puppet\", \"file\": \"path=/var/lib/docker-puppet state=directory\"}, {\"copy\": \"content=\\\"{{puppet_config | to_json}}\\\" dest=/var/lib/docker-puppet/docker-puppet.json force=yes mode=0600\", \"name\": \"Write docker-puppet-tasks json files\"}, {\"copy\": \"content=\\\"{{docker_puppet_script}}\\\" dest=/var/lib/docker-puppet/docker-puppet.py force=yes mode=0600\", \"name\": \"Write docker-puppet.py\"}, {\"copy\": \"content=\\\"{{docker_startup_configs | to_json}}\\\" dest=/var/lib/docker-container-startup-configs.json force=yes mode=0600\", \"name\": \"Write docker-container-startup-configs\"}, {\"with_dict\": \"{{docker_startup_configs}}\", \"copy\": \"content=\\\"{{item.value|to_json}}\\\" dest=\\\"/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json\\\" force=yes mode=0600\", \"name\": \"Write per-step docker-container-startup-configs\"}, {\"name\": \"Create /var/lib/kolla/config_files directory\", \"file\": \"path=/var/lib/kolla/config_files state=directory\"}, {\"with_dict\": \"{{kolla_config}}\", \"copy\": \"content=\\\"{{item.value|to_json}}\\\" dest=\\\"{{item.key}}\\\" force=yes mode=0600\", \"name\": \"Write kolla config json files\"}, {\"with_fileglob\": [\"/var/lib/docker-puppet/docker-puppet-tasks*.json\"], \"when\": \"deploy_server_id == bootstrap_server_id\", \"name\": \"Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files\", \"file\": {\"path\": \"{{item}}\", \"state\": \"absent\"}}, {\"with_dict\": \"{{docker_puppet_tasks}}\", \"copy\": \"content=\\\"{{item.value|to_json}}\\\" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace(\\\"step_\\\", \\\"\\\")}}.json force=yes mode=0600\", \"when\": \"deploy_server_id == bootstrap_server_id\", \"name\": \"Write docker-puppet-tasks json files\"}]]}, \"hosts\": \"localhost\", \"vars\": {\"kolla_config\": {\"get_param\": [\"role_data\", \"Controller\", \"kolla_config\"]}, \"docker_puppet_tasks\": {\"get_param\": [\"role_data\", \"Controller\", \"docker_puppet_tasks\"]}, \"bootstrap_server_id\": {\"get_param\": [\"servers\", \"Controller\", \"0\"]}, \"puppet_step_config\": {\"get_param\": [\"role_data\", \"Controller\", \"step_config\"]}, \"docker_puppet_script\": {\"get_file\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/docker/docker-puppet.py\"}, \"docker_startup_configs\": {\"get_param\": [\"role_data\", \"Controller\", \"docker_config\"]}, \"puppet_config\": {\"get_param\": [\"role_data\", \"Controller\", \"puppet_config\"]}}}]}, \"template\": \"_PLAYBOOK\"}}, \"options\": {\"modulepath\": \"/usr/share/ansible-modules\"}}}, \"ControllerArtifactsDeploy\": {\"type\": \"OS::Heat::StructuredDeploymentGroup\", \"properties\": {\"config\": {\"get_resource\": \"ControllerArtifactsConfig\"}, \"servers\": {\"get_param\": [\"servers\", \"Controller\"]}}}, \"WorkflowTasks_Step4_Execution\": {\"depends_on\": \"WorkflowTasks_Step4\", \"type\": \"OS::Mistral::ExternalResource\", \"properties\": {\"always_update\": true, \"actions\": {\"CREATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step4\"}}, \"UPDATE\": {\"params\": {\"evaluate_env\": false, \"env\": {\"role_merged_configs\": {\"Controller\": {\"get_param\": [\"role_data\", \"Controller\", \"merged_config_settings\"]}}, \"service_ips\": {\"get_param\": \"ctlplane_service_ips\"}}}, \"workflow\": {\"get_resource\": \"WorkflowTasks_Step4\"}}}}, \"condition\": \"WorkflowTasks_Step4_Enabled\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/ceilometer-collector-disabled.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the disabled Ceilometer Collector role.\", \"value\": {\"service_name\": \"ceilometer_collector_disabled\", \"upgrade_tasks\": [{\"name\": \"Stop and disable ceilometer_collector service on upgrade\", \"service\": \"name=openstack-ceilometer-collector state=stopped enabled=no\", \"tags\": \"step1\"}]}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Ceilometer Collector service, disabled since pike\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sahara-api.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Sahara API role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"SaharaBase\", \"role_data\", \"config_settings\"]}, {\"sahara::host\": {\"get_param\": [\"ServiceNetMap\", \"SaharaApiNetwork\"]}, \"sahara::port\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"port\"]}, \"tripleo.sahara_api.firewall_rules\": {\"132 sahara\": {\"dport\": [8386, 13386]}}, \"sahara::policy::policies\": {\"get_param\": \"SaharaApiPolicies\"}, \"sahara::service::api::api_workers\": {\"get_param\": \"SaharaWorkers\"}}]}, \"upgrade_tasks\": [{\"name\": \"Stop sahara_api service\", \"service\": \"name=openstack-sahara-api state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"sahara\"], \"service_name\": \"sahara_api\", \"step_config\": \"include ::tripleo::profile::base::sahara::api\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionSaharaApi\"}, \"service_config_settings\": {\"keystone\": {\"sahara::keystone::auth::tenant\": \"service\", \"sahara::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"sahara::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"SaharaAdmin\", \"uri\"]}, \"sahara::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"SaharaPublic\", \"uri\"]}, \"sahara::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"SaharaInternal\", \"uri\"]}, \"sahara::keystone::auth::password\": {\"get_param\": \"SaharaPassword\"}}, \"mysql\": {\"sahara::db::mysql::user\": \"sahara\", \"sahara::db::mysql::password\": {\"get_param\": \"SaharaPassword\"}, \"sahara::db::mysql::dbname\": \"sahara\", \"sahara::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"sahara::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}}}, \"logging_source\": {\"get_param\": \"SaharaApiLoggingSource\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Sahara API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"SaharaApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Sahara API.\
0.355 | 3311: e.g. { sahara-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"SaharaWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"The number of workers for the sahara-api.\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"SaharaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the sahara service account, used by sahara-api.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"SaharaApiLoggingSource\": {\"default\": {\"path\": \"/var/log/sahara/sahara-api.log\", \"tag\": \"openstack.sahara.api\"}, \"type\": \"json\"}, \"MonitoringSubscriptionSaharaApi\": {\"default\": \"overcloud-sahara-api\", \"type\": \"string\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"SaharaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sahara-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/time/ntp.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role ntp using composable services.\", \"value\": {\"service_name\": \"ntp\", \"step_config\": \"include ::tripleo::profile::base::time::ntp\", \"config_settings\": {\"ntp::servers\": {\"get_param\": \"NtpServer\"}, \"tripleo.ntp.firewall_rules\": {\"105 ntp\": {\"dport\": 123, \"proto\": \"udp\"}}}}}}, \"heat_template_version\": \"pike\", \"description\": \"NTP service deployment using puppet, this YAML file creates the interface between the HOT template and the puppet manifest that actually installs and configure NTP.\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NtpServer\": {\"default\": [\"pool.ntp.org\"], \"type\": \"comma_delimited_list\", \"description\": \"NTP servers list. Defaulted to pool.ntp.org in order to have a sane default for Pacemaker deployments when not configuring this parameter by default.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Pacemaker role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionPacemaker\"}, \"config_settings\": {\"tripleo::fencing::config\": {\"get_param\": \"FencingConfig\"}, \"corosync_ipv6\": {\"get_param\": \"CorosyncIPv6\"}, \"tripleo.pacemaker.firewall_rules\": {\"131 pacemaker udp\": {\"dport\": 5405, \"proto\": \"udp\"}, \"130 pacemaker tcp\": {\"dport\": [2224, 3121, 21064], \"proto\": \"tcp\"}}, \"tripleo::profile::base::pacemaker::remote_authkey\": {\"get_param\": \"PacemakerRemoteAuthkey\"}, \"hacluster_pwd\": {\"yaql\": {\"expression\": \"$.data.passwords.where($ != '').first()\", \"data\": {\"passwords\": [{\"get_param\": \"PcsdPassword\"}, {\"get_param\": [\"DefaultPasswords\", \"pcsd_password\"]}]}}}, \"pacemaker::corosync::cluster_name\": \"tripleo_cluster\", \"pacemaker::resource_defaults::defaults\": {\"resource-stickiness\": {\"value\": \"INFINITY\"}}, \"pacemaker::corosync::settle_tries\": {\"get_param\": \"CorosyncSettleTries\"}, \"enable_fencing\": {\"get_param\": \"EnableFencing\"}, \"corosync_token_timeout\": 10000, \"pacemaker::corosync::manage_fw\": false}, \"upgrade_tasks\": [{\"pacemaker_cluster\": \"state=online check_and_fail=true\", \"poll\": 4, \"name\": \"Check pacemaker cluster running before upgrade\", \"tags\": \"step0,validation\", \"async\": 30}, {\"pacemaker_cluster\": \"state=offline\", \"name\": \"Stop pacemaker cluster\", \"tags\": \"step3\"}, {\"pacemaker_cluster\": \"state=online\", \"name\": \"Start pacemaker cluster\", \"tags\": \"step4\"}], \"logging_groups\": [\"haclient\"], \"service_name\": \"pacemaker\", \"logging_source\": {\"get_param\": \"PacemakerLoggingSource\"}, \"step_config\": \"include ::tripleo::profile::base::pacemaker\
0.355 | 3311: \"}}}, \"heat_template_version\": \"pike\", \"description\": \"Pacemaker service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"EnableFencing\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable fencing in Pacemaker or not.\"}, \"FencingConfig\": {\"default\": {}, \"type\": \"json\", \"description\": \"Pacemaker fencing configuration. The JSON should have\
0.355 | 3311: the following structure:\
0.355 | 3311: {\
0.355 | 3311: \\\"devices\\\": [\
0.355 | 3311: {\
0.355 | 3311: \\\"agent\\\": \\\"AGENT_NAME\\\",\
0.355 | 3311: \\\"host_mac\\\": \\\"HOST_MAC_ADDRESS\\\",\
0.355 | 3311: \\\"params\\\": {\\\"PARAM_NAME\\\": \\\"PARAM_VALUE\\\"}\
0.355 | 3311: }\
0.355 | 3311: ]\
0.355 | 3311: }\
0.355 | 3311: For instance:\
0.355 | 3311: {\
0.355 | 3311: \\\"devices\\\": [\
0.355 | 3311: {\
0.355 | 3311: \\\"agent\\\": \\\"fence_xvm\\\",\
0.355 | 3311: \\\"host_mac\\\": \\\"52:54:00:aa:bb:cc\\\",\
0.355 | 3311: \\\"params\\\": {\
0.355 | 3311: \\\"multicast_address\\\": \\\"225.0.0.12\\\",\
0.355 | 3311: \\\"port\\\": \\\"baremetal_0\\\",\
0.355 | 3311: \\\"manage_fw\\\": true,\
0.355 | 3311: \\\"manage_key_file\\\": true,\
0.355 | 3311: \\\"key_file\\\": \\\"/etc/fence_xvm.key\\\",\
0.355 | 3311: \\\"key_file_password\\\": \\\"abcdef\\\"\
0.355 | 3311: }\
0.355 | 3311: }\
0.355 | 3311: ]\
0.355 | 3311: }\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"MonitoringSubscriptionPacemaker\": {\"default\": \"overcloud-pacemaker\", \"type\": \"string\"}, \"CorosyncSettleTries\": {\"default\": 360, \"type\": \"number\", \"description\": \"Number of tries for cluster settling. This has the same default as the pacemaker puppet module. Override to a smaller value when in need to replace a controller node.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"PcsdPassword\": {\"default\": \"\", \"hidden\": true, \"type\": \"string\", \"description\": \"The password for the 'pcsd' user for pacemaker.\"}, \"PacemakerRemoteAuthkey\": {\"default\": \"\", \"hidden\": true, \"type\": \"string\", \"description\": \"The authkey for the pacemaker remote service.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"EnableLoadBalancer\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to deploy a LoadBalancer on the Controller\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"PacemakerLoggingSource\": {\"default\": {\"path\": \"/var/log/pacemaker.log,/var/log/cluster/corosync.log\", \"tag\": \"system.pacemaker\", \"format\": \"/^(?<time>[^ ]*\\\\s*[^ ]* [^ ]*) \\\\[(?<pid>[^ ]*)\\\\] (?<host>[^ ]*) (?<message>.*)$/\"}, \"type\": \"json\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"CorosyncIPv6\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Enable IPv6 in Corosync\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-engine.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Heat Engine role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"HeatBase\", \"role_data\", \"config_settings\"]}, {\"heat::engine::heat_waitcondition_server_url\": {\"make_url\": {\"path\": \"/v1/waitcondition\", \"host\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"host\"]}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"port\"]}}}, \"heat::keystone_ec2_uri\": {\"list_join\": [\"\", [{\"get_param\": [\"EndpointMap\", \"KeystoneV3Internal\", \"uri\"]}, \"/ec2tokens\"]]}, \"heat::engine::max_nested_stack_depth\": 6, \"tripleo::profile::base::heat::manage_db_purge\": {\"get_param\": \"HeatEnableDBPurge\"}, \"heat::database_connection\": {\"make_url\": {\"username\": \"heat\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"HeatPassword\"}, \"path\": \"/heat\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"heat::engine::auth_encryption_key\": {\"yaql\": {\"expression\": \"$.data.passwords.where($ != '').first()\", \"data\": {\"passwords\": [{\"get_param\": \"HeatAuthEncryptionKey\"}, {\"get_param\": [\"DefaultPasswords\", \"heat_auth_encryption_key\"]}]}}}, \"heat::engine::heat_metadata_server_url\": {\"make_url\": {\"host\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"host\"]}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"protocol\"]}, \"port\": {\"get_param\": [\"EndpointMap\", \"HeatCfnPublic\", \"port\"]}}}, \"heat::engine::max_resources_per_stack\": {\"get_param\": \"HeatMaxResourcesPerStack\"}, \"heat::engine::configure_delegated_roles\": false, \"heat::engine::num_engine_workers\": {\"get_param\": \"HeatWorkers\"}, \"heat::engine::convergence_engine\": {\"get_param\": \"HeatConvergenceEngine\"}, \"heat::engine::trusts_delegated_roles\": [], \"heat::keystone::domain::domain_password\": {\"get_param\": \"HeatStackDomainAdminPassword\"}}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"heat_engine_enabled\", \"command\": \"systemctl is-enabled openstack-heat-engine\", \"name\": \"Check if heat_engine is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"heat_engine_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-heat-engine is running\", \"tags\": \"step0,validation\"}, {\"when\": \"heat_engine_enabled.rc == 0\", \"name\": \"Stop heat_engine service\", \"service\": \"name=openstack-heat-engine state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"heat\"], \"service_name\": \"heat_engine\", \"step_config\": \"include ::tripleo::profile::base::heat::engine\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionHeatEngine\"}, \"service_config_settings\": {\"keystone\": {\"tripleo::profile::base::keystone::heat_admin_password\": {\"get_param\": \"HeatStackDomainAdminPassword\"}}, \"mysql\": {\"heat::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"heat::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"heat::db::mysql::user\": \"heat\", \"heat::db::mysql::password\": {\"get_param\": \"HeatPassword\"}, \"heat::db::mysql::dbname\": \"heat\"}}, \"logging_source\": {\"get_param\": \"HeatEngineLoggingSource\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"Openstack Heat Engine service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"HeatWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Heat service.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"HeatAuthEncryptionKey\": {\"default\": \"\", \"hidden\": true, \"type\": \"string\", \"description\": \"Auth encryption key for heat-engine\"}, \"HeatMaxResourcesPerStack\": {\"default\": 1000, \"type\": \"number\", \"description\": \"Maximum resources allowed per top-level stack. -1 stands for unlimited.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"HeatEngineLoggingSource\": {\"default\": {\"path\": \"/var/log/heat/heat-engine.log\", \"tag\": \"openstack.heat.engine\"}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"HeatConvergenceEngine\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Enables the heat engine with the convergence architecture.\"}, \"HeatEnableDBPurge\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to create cron job for purging soft deleted rows in the Heat database.\
0.355 | 3311: \"}, \"HeatStackDomainAdminPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Password for heat_stack_domain_admin user.\"}, \"HeatPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the Heat service and db account, used by the Heat services.\"}, \"MonitoringSubscriptionHeatEngine\": {\"default\": \"overcloud-heat-engine\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"HeatBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-metadata.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron Metadata agent configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NeutronWorkers\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Sets the number of worker processes for the neutron metadata agent. The\
0.355 | 3311: default value results in the configuration being left unset and a\
0.355 | 3311: system-dependent default will be chosen (usually the number of\
0.355 | 3311: processors). Please note that this can result in a large number of\
0.355 | 3311: processes and memory consumption on systems with a large core count. On\
0.355 | 3311: such systems it is recommended that a non-default value be selected that\
0.355 | 3311: matches the load requirements.\
0.355 | 3311: \"}, \"MonitoringSubscriptionNeutronMetadata\": {\"default\": \"overcloud-neutron-metadata\", \"type\": \"string\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NeutronPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the neutron service and db account, used by neutron agents.\"}, \"NeutronMetadataAgentLoggingSource\": {\"default\": {\"path\": \"/var/log/neutron/metadata-agent.log\", \"tag\": \"openstack.neutron.agent.metadata\"}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"NeutronMetadataProxySharedSecret\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Shared secret to prevent spoofing\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron Metadata agent service.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNeutronMetadata\"}, \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronBase\", \"role_data\", \"config_settings\"]}, {\"neutron::agents::metadata::metadata_host\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaMetadataNetwork\"]}}, \"template\": \"%{hiera('cloud_name_$NETWORK')}\"}}, \"neutron::agents::metadata::shared_secret\": {\"get_param\": \"NeutronMetadataProxySharedSecret\"}, \"neutron::agents::metadata::metadata_ip\": \"%{hiera('nova_metadata_vip')}\", \"neutron::agents::metadata::metadata_protocol\": {\"if\": [\"internal_tls_enabled\", \"https\", \"http\"]}, \"neutron::agents::metadata::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"neutron::agents::metadata::auth_password\": {\"get_param\": \"NeutronPassword\"}, \"neutron::agents::metadata::auth_tenant\": \"service\"}, {\"if\": [\"neutron_workers_unset\", {}, {\"neutron::agents::metadata::metadata_workers\": {\"get_param\": \"NeutronWorkers\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"neutron_metadata_agent_enabled\", \"command\": \"systemctl is-enabled neutron-metadata-agent\", \"name\": \"Check if neutron_metadata_agent is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'neutron-metadata-agent' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"neutron_metadata_agent_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service neutron-metadata-agent is running\", \"tags\": \"step0,validation\"}, {\"when\": \"neutron_metadata_agent_enabled.rc == 0\", \"name\": \"Stop neutron_metadata service\", \"service\": \"name=neutron-metadata-agent state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"neutron\"], \"service_name\": \"neutron_metadata\", \"logging_source\": {\"get_param\": \"NeutronMetadataAgentLoggingSource\"}, \"step_config\": \"include tripleo::profile::base::neutron::metadata\
0.355 | 3311: \"}}}, \"conditions\": {\"neutron_workers_unset\": {\"equals\": [{\"get_param\": \"NeutronWorkers\"}, \"\"]}, \"internal_tls_enabled\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}}, \"resources\": {\"NeutronBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-evaluator.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Aodh Evaluator service.\", \"value\": {\"service_name\": \"aodh_evaluator\", \"step_config\": \"include tripleo::profile::base::aodh::evaluator\
0.355 | 3311: \", \"config_settings\": {\"get_attr\": [\"AodhBase\", \"role_data\", \"config_settings\"]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"aodh_evaluator_enabled\", \"command\": \"systemctl is-enabled openstack-aodh-evaluator\", \"name\": \"Check if aodh_evaluator is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-aodh-evaluator' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"aodh_evaluator_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-aodh-evaluator is running\", \"tags\": \"step0,validation\"}, {\"when\": \"aodh_evaluator_enabled.rc == 0\", \"name\": \"Stop aodh_evaluator service\", \"service\": \"name=openstack-aodh-evaluator state=stopped\", \"tags\": \"step1\"}], \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionAodhEvaluator\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Aodh Evaluator service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"MonitoringSubscriptionAodhEvaluator\": {\"default\": \"overcloud-ceilometer-aodh-evaluator\", \"type\": \"string\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"resources\": {\"AodhBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-plumgrid.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron Plumgrid plugin\", \"value\": {\"service_name\": \"neutron_plugin_plumgrid\", \"step_config\": \"include tripleo::profile::base::neutron::plugins::plumgrid\", \"config_settings\": {\"neutron::plugins::plumgrid::metadata_proxy_shared_secret\": {\"get_param\": \"NeutronMetadataProxySharedSecret\"}, \"neutron::plugins::plumgrid::plumlib_package_ensure\": {\"get_param\": \"PLUMgridPlumlibVersion\"}, \"neutron::plugins::plumgrid::director_server_port\": {\"get_param\": \"PLUMgridDirectorServerPort\"}, \"neutron::plugins::plumgrid::admin_password\": {\"get_param\": \"AdminPassword\"}, \"neutron::plugins::plumgrid::identity_version\": {\"get_param\": \"PLUMgridIdentityVersion\"}, \"neutron::plugins::plumgrid::connector_type\": {\"get_param\": \"PLUMgridConnectorType\"}, \"neutron::plugins::plumgrid::nova_metadata_ip\": {\"get_param\": \"PLUMgridNovaMetadataIP\"}, \"neutron::plugins::plumgrid::l2gateway_vendor\": {\"get_param\": \"PLUMgridL2GatewayVendor\"}, \"neutron::plugins::plumgrid::password\": {\"get_param\": \"PLUMgridPassword\"}, \"neutron::plugins::plumgrid::controller_priv_host\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"host\"]}, \"neutron::plugins::plumgrid::username\": {\"get_param\": \"PLUMgridUsername\"}, \"neutron::plugins::plumgrid::package_ensure\": {\"get_param\": \"PLUMgridNeutronPluginVersion\"}, \"neutron::plugins::plumgrid::l2gateway_sw_username\": {\"get_param\": \"PLUMgridL2GatewayUsername\"}, \"neutron::plugins::plumgrid::l2gateway_sw_password\": {\"get_param\": \"PLUMgridL2GatewayPassword\"}, \"neutron::plugins::plumgrid::director_server\": {\"get_param\": \"PLUMgridDirectorServer\"}, \"neutron::plugins::plumgrid::connection\": {\"make_url\": {\"username\": \"neutron\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"NeutronPassword\"}, \"path\": \"/ovs_neutron\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"neutron::plugins::plumgrid::nova_metadata_port\": {\"get_param\": \"PLUMgridNovaMetadataPort\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron Plumgrid plugin\
0.355 | 3311: \", \"parameters\": {\"PLUMgridConnectorType\": {\"default\": \"distributed\", \"type\": \"string\", \"description\": \"Neutron Network Connector Type\"}, \"NeutronPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the neutron service and db account, used by neutron agents.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"PLUMgridIdentityVersion\": {\"default\": \"v2.0\", \"type\": \"string\", \"description\": \"Keystone Identity version\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"PLUMgridPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Password for PLUMgrid platform\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"PLUMgridL2GatewayPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Password for L2 Gateway Switch\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"PLUMgridNovaMetadataPort\": {\"default\": 8775, \"type\": \"string\", \"description\": \"Port of Nova Metadata\"}, \"AdminPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the keystone admin account, used for monitoring, querying neutron etc.\"}, \"PLUMgridUsername\": {\"type\": \"string\", \"description\": \"Username for PLUMgrid platform\"}, \"PLUMgridNeutronPluginVersion\": {\"default\": \"present\", \"type\": \"string\", \"description\": \"PLUMgrid Neutron Plugin version\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"PLUMgridL2GatewayUsername\": {\"default\": \"username\", \"type\": \"string\", \"description\": \"Username for L2 Gateway Switch\"}, \"PLUMgridDirectorServer\": {\"default\": \"127.0.0.1\", \"type\": \"string\", \"description\": \"IP address of the PLUMgrid Director Server\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NeutronMetadataProxySharedSecret\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Shared secret to prevent spoofing\"}, \"PLUMgridNovaMetadataIP\": {\"default\": \"169.254.169.254\", \"type\": \"string\", \"description\": \"IP address of Nova Metadata\"}, \"PLUMgridDirectorServerPort\": {\"default\": 443, \"type\": \"string\", \"description\": \"Port of the PLUMgrid Director Server\"}, \"PLUMgridPlumlibVersion\": {\"default\": \"present\", \"type\": \"string\", \"description\": \"PLUMgrid Plumlib version\"}, \"PLUMgridL2GatewayVendor\": {\"default\": \"vendor\", \"type\": \"string\", \"description\": \"Vendor for L2 Gateway Switch\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-api.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Openstack Heat API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"HeatWorkers\": {\"default\": 0, \"type\": \"number\", \"description\": \"Number of workers for Heat service.\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"HeatApiLoggingSource\": {\"default\": {\"path\": \"/var/log/heat/heat-api.log\", \"tag\": \"openstack.heat.api\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"MonitoringSubscriptionHeatApi\": {\"default\": \"overcloud-heat-api\", \"type\": \"string\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"HeatPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the Heat service and db account, used by the Heat services.\"}, \"HeatApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Heat API.\
0.355 | 3311: e.g. { heat-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Heat API role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"HeatBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"config_settings\"]}, {\"heat::api::service_name\": \"httpd\", \"heat::wsgi::apache_api::servername\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"heat::wsgi::apache_api::ssl\": {\"get_param\": \"EnableInternalTLS\"}, \"tripleo.heat_api.firewall_rules\": {\"125 heat_api\": {\"dport\": [8004, 13004]}}, \"heat::policy::policies\": {\"get_param\": \"HeatApiPolicies\"}, \"heat::api::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}, \"heat::wsgi::apache_api::bind_host\": {\"get_param\": [\"ServiceNetMap\", \"HeatApiNetwork\"]}}, {\"if\": [\"heat_workers_zero\", {}, {\"heat::wsgi::apache_api::workers\": {\"get_param\": \"HeatWorkers\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"heat_api_enabled\", \"command\": \"systemctl is-enabled openstack-heat-api\", \"name\": \"Check is heat_api is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"heat_api_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-heat-api is running\", \"tags\": \"step0,validation\"}, {\"ignore_errors\": true, \"shell\": \"httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi\", \"register\": \"heat_api_apache\", \"name\": \"check for heat_api running under apache (post upgrade)\", \"tags\": \"step1\"}, {\"when\": \"heat_api_apache.rc == 0\", \"name\": \"Stop heat_api service (running under httpd)\", \"service\": \"name=httpd state=stopped\", \"tags\": \"step1\"}, {\"when\": \"heat_api_enabled.rc == 0\", \"name\": \"Stop and disable heat_api service (pre-upgrade not under httpd)\", \"service\": \"name=openstack-heat-api state=stopped enabled=no\", \"tags\": \"step1\"}], \"logging_groups\": [\"heat\"], \"service_name\": \"heat_api\", \"step_config\": \"include ::tripleo::profile::base::heat::api\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionHeatApi\"}, \"service_config_settings\": {\"keystone\": {\"map_merge\": [{\"get_attr\": [\"HeatBase\", \"role_data\", \"service_config_settings\", \"keystone\"]}, {\"heat::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"HeatAdmin\", \"uri\"]}, \"heat::keystone::auth::password\": {\"get_param\": \"HeatPassword\"}, \"heat::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"heat::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"HeatPublic\", \"uri\"]}, \"heat::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"HeatInternal\", \"uri\"]}, \"heat::keystone::auth::tenant\": \"service\"}]}}, \"metadata_settings\": {\"get_attr\": [\"ApacheServiceBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"HeatApiLoggingSource\"}}}}, \"conditions\": {\"heat_workers_zero\": {\"equals\": [{\"get_param\": \"HeatWorkers\"}, 0]}}, \"resources\": {\"ApacheServiceBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"HeatBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-api.yaml": "{\"parameter_groups\": [{\"description\": \"The following parameters are deprecated and will be removed. They should not\
0.355 | 3311: be relied on for new deployments. If you have concerns regarding deprecated\
0.355 | 3311: parameters, please contact the TripleO development team on IRC or the\
0.355 | 3311: OpenStack mailing list.\
0.355 | 3311: \", \"parameters\": [\"NeutronL3HA\"], \"label\": \"deprecated\"}], \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron Server configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"NeutronEnableDVR\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Enable Neutron DVR.\"}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"NeutronAllowL3AgentFailover\": {\"default\": \"True\", \"type\": \"string\", \"description\": \"Allow automatic l3-agent failover\"}, \"NeutronWorkers\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Sets the number of API and RPC workers for the Neutron service.\
0.355 | 3311: The default value results in the configuration being left unset\
0.355 | 3311: and a system-dependent default will be chosen (usually the number\
0.355 | 3311: of processors). Please note that this can result in a large number\
0.355 | 3311: of processes and memory consumption on systems with a large core\
0.355 | 3311: count. On such systems it is recommended that a non-default value\
0.355 | 3311: be selected that matches the load requirements.\
0.355 | 3311: \"}, \"NeutronPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the neutron service and db account, used by neutron agents.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"NeutronApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Neutron API.\
0.355 | 3311: e.g. { neutron-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NovaPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the nova service and db account\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NeutronL3HA\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Whether to enable HA for virtual routers. When not set, L3 HA will be\
0.355 | 3311: automatically enabled if the number of nodes hosting controller\
0.355 | 3311: configurations and DVR is disabled. Valid values are 'true' or 'false'\
0.355 | 3311: This parameter is being deprecated in Newton and is scheduled to be\
0.355 | 3311: removed in Ocata. Future releases will enable L3 HA by default if it is\
0.355 | 3311: appropriate for the deployment type. Alternate mechanisms will be\
0.355 | 3311: available to override.\
0.355 | 3311: \"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"MonitoringSubscriptionNeutronServer\": {\"default\": \"overcloud-neutron-server\", \"type\": \"string\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"NeutronApiLoggingSource\": {\"default\": {\"path\": \"/var/log/neutron/server.log\", \"tag\": \"openstack.neutron.api\"}, \"type\": \"json\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron Server agent service.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronBase\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"TLSProxyBase\", \"role_data\", \"config_settings\"]}, {\"neutron::server::enable_dvr\": {\"get_param\": \"NeutronEnableDVR\"}, \"neutron::server::notifications::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"tripleo::profile::base::neutron::server::tls_proxy_bind_ip\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}, \"tripleo.neutron_api.firewall_rules\": {\"114 neutron api\": {\"dport\": [9696, 13696]}}, \"neutron::keystone::authtoken::user_domain_name\": \"Default\", \"neutron::server::notifications::tenant_name\": \"service\", \"neutron::server::router_distributed\": {\"get_param\": \"NeutronEnableDVR\"}, \"neutron::keystone::authtoken::project_name\": \"service\", \"neutron::policy::policies\": {\"get_param\": \"NeutronApiPolicies\"}, \"neutron::server::notifications::password\": {\"get_param\": \"NovaPassword\"}, \"tripleo::profile::base::neutron::server::tls_proxy_port\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"port\"]}, \"neutron::keystone::authtoken::project_domain_name\": \"Default\", \"neutron::server::allow_automatic_l3agent_failover\": {\"get_param\": \"NeutronAllowL3AgentFailover\"}, \"neutron::server::database_connection\": {\"make_url\": {\"username\": \"neutron\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"NeutronPassword\"}, \"path\": \"/ovs_neutron\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"neutron::bind_host\": {\"if\": [\"use_tls_proxy\", \"localhost\", {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}]}, \"tripleo::profile::base::neutron::server::l3_ha_override\": {\"get_param\": \"NeutronL3HA\"}, \"neutron::server::notifications::project_name\": \"service\", \"neutron::keystone::authtoken::password\": {\"get_param\": \"NeutronPassword\"}, \"tripleo::profile::base::neutron::server::tls_proxy_fqdn\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NeutronApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"neutron::keystone::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"neutron::server::enable_proxy_headers_parsing\": true, \"neutron::keystone::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"neutron::server::sync_db\": true}, {\"if\": [\"neutron_workers_unset\", {}, {\"neutron::server::rpc_workers\": {\"get_param\": \"NeutronWorkers\"}, \"neutron::server::api_workers\": {\"get_param\": \"NeutronWorkers\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"neutron_server_enabled\", \"command\": \"systemctl is-enabled neutron-server\", \"name\": \"Check if neutron_server is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'neutron-server' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"neutron_server_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service neutron-server is running\", \"tags\": \"step0,validation\"}, {\"when\": \"neutron_server_enabled.rc == 0\", \"name\": \"Stop neutron_api service\", \"service\": \"name=neutron-server state=stopped\", \"tags\": \"step1\"}], \"logging_groups\": [\"neutron\"], \"service_name\": \"neutron_api\", \"step_config\": \"include tripleo::profile::base::neutron::server\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNeutronServer\"}, \"service_config_settings\": {\"keystone\": {\"neutron::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"neutron::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"NeutronPublic\", \"uri\"]}, \"neutron::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"NeutronAdmin\", \"uri\"]}, \"neutron::keystone::auth::tenant\": \"service\", \"neutron::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"NeutronInternal\", \"uri\"]}, \"neutron::keystone::auth::password\": {\"get_param\": \"NeutronPassword\"}}, \"mysql\": {\"neutron::db::mysql::password\": {\"get_param\": \"NeutronPassword\"}, \"neutron::db::mysql::dbname\": \"ovs_neutron\", \"neutron::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"neutron::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"neutron::db::mysql::user\": \"neutron\"}}, \"metadata_settings\": {\"get_attr\": [\"TLSProxyBase\", \"role_data\", \"metadata_settings\"]}, \"logging_source\": {\"get_param\": \"NeutronApiLoggingSource\"}}}}, \"conditions\": {\"neutron_workers_unset\": {\"equals\": [{\"get_param\": \"NeutronWorkers\"}, \"\"]}, \"use_tls_proxy\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}}, \"resources\": {\"NeutronBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"TLSProxyBase\": {\"type\": \"OS::TripleO::Services::TLSProxyBase\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-nuage.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron Nuage plugin\", \"value\": {\"service_name\": \"neutron_plugin_nuage\", \"step_config\": \"include tripleo::profile::base::neutron::plugins::nuage\", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NeutronBase\", \"role_data\", \"config_settings\"]}, {\"neutron::plugins::nuage::nuage_vsd_ip\": {\"get_param\": \"NeutronNuageVSDIp\"}, \"neutron::plugins::nuage::nuage_vsd_username\": {\"get_param\": \"NeutronNuageVSDUsername\"}, \"neutron::plugins::nuage::nuage_base_uri_version\": {\"get_param\": \"NeutronNuageBaseURIVersion\"}, \"neutron::plugins::nuage::nuage_net_partition_name\": {\"get_param\": \"NeutronNuageNetPartitionName\"}, \"nova::api::use_forwarded_for\": {\"get_param\": \"UseForwardedFor\"}, \"neutron::plugins::nuage::nuage_cms_id\": {\"get_param\": \"NeutronNuageCMSId\"}, \"neutron::plugins::nuage::nuage_vsd_password\": {\"get_param\": \"NeutronNuageVSDPassword\"}, \"neutron::plugins::nuage::nuage_vsd_organization\": {\"get_param\": \"NeutronNuageVSDOrganization\"}}]}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron Nuage plugin\
0.355 | 3311: \", \"parameters\": {\"NeutronNuageVSDIp\": {\"type\": \"string\", \"description\": \"IP address and port of the Virtual Services Directory\"}, \"NeutronNuageVSDPassword\": {\"type\": \"string\", \"description\": \"Password to be used to log into VSD\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NeutronNuageVSDUsername\": {\"type\": \"string\", \"description\": \"Username to be used to log into VSD\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NeutronNuageCMSId\": {\"type\": \"string\", \"description\": \"Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD\"}, \"NeutronNuageBaseURIVersion\": {\"default\": \"default_uri_version\", \"type\": \"string\", \"description\": \"URI version to be used based on the VSD release\"}, \"UseForwardedFor\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.\"}, \"NeutronNuageVSDOrganization\": {\"default\": \"organization\", \"type\": \"string\", \"description\": \"Organization parameter required to log into VSD\"}, \"NeutronNuageNetPartitionName\": {\"default\": \"default_name\", \"type\": \"string\", \"description\": \"Specifies the title that you will see on the VSD\"}}, \"resources\": {\"NeutronBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-libvirt.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"Libvirt service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"MigrationSshPort\": {\"default\": 22, \"type\": \"number\", \"description\": \"Target port for migration over ssh\"}, \"LibvirtCACert\": {\"default\": \"\", \"type\": \"string\", \"description\": \"This specifies the CA certificate to use for TLS in libvirt. This file will be symlinked to the default CA path in libvirt, which is /etc/pki/CA/cacert.pem. Note that due to limitations GNU TLS, which is the TLS backend for libvirt, the file must be less than 65K (so we can't use the system's CA bundle). This parameter should be used if the default (which comes from the InternalTLSCAFile parameter) is not desired. The current default reflects TripleO's default CA, which is FreeIPA. It will only be used if internal TLS is enabled.\"}, \"LibvirtEnabledPerfEvents\": {\"default\": [], \"type\": \"comma_delimited_list\", \"description\": \"This is a performance event list which could be used as monitor. For example - ``enabled_perf_events = cmt, mbml, mbmt`` The supported events list can be found in https://libvirt.org/html/libvirt-libvirt-domain.html , which you may need to search key words ``VIR_PERF_PARAM_*``\"}, \"MonitoringSubscriptionNovaLibvirt\": {\"default\": \"overcloud-nova-libvirt\", \"type\": \"string\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"InternalTLSCAFile\": {\"default\": \"/etc/ipa/ca.crt\", \"type\": \"string\", \"description\": \"Specifies the default CA cert to use if TLS is used for services in the internal network.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"CephClientKey\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The Ceph client key. Can be created with ceph-authtool --gen-print-key.\"}, \"CinderEnableRbdBackend\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable or not the Rbd backend for Cinder\"}, \"UseTLSTransportForLiveMigration\": {\"default\": true, \"type\": \"boolean\", \"description\": \"If set to true and if EnableInternalTLS is enabled, it will set the libvirt URI's transport to tls and configure the relevant keys for libvirt.\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"NovaComputeLibvirtType\": {\"default\": \"kvm\", \"type\": \"string\"}, \"MigrationSshKey\": {\"default\": {\"public_key\": \"\", \"private_key\": \"\"}, \"type\": \"json\", \"description\": \"SSH key for migration. Expects a dictionary with keys 'public_key' and 'private_key'. Values should be identical to SSH public/private key files.\
0.355 | 3311: \"}, \"CephClusterFSID\": {\"type\": \"string\", \"description\": \"The Ceph cluster FSID. Must be a UUID.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"CephClientUserName\": {\"default\": \"openstack\", \"type\": \"string\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Libvirt service.\", \"value\": {\"metadata_settings\": {\"if\": [\"use_tls_for_live_migration\", [{\"type\": \"node\", \"network\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}, \"service\": \"libvirt\"}], null]}, \"service_name\": \"nova_libvirt\", \"step_config\": \"include tripleo::profile::base::nova::libvirt\
0.355 | 3311: \", \"config_settings\": {\"map_merge\": [{\"get_attr\": [\"NovaBase\", \"role_data\", \"config_settings\"]}, {\"tripleo.nova_libvirt.firewall_rules\": {\"200 nova_libvirt\": {\"dport\": [16514, \"49152-49215\", \"5900-6923\"]}}, \"nova::compute::rbd::libvirt_rbd_secret_uuid\": {\"get_param\": \"CephClusterFSID\"}, \"nova::compute::libvirt::libvirt_virt_type\": {\"get_param\": \"NovaComputeLibvirtType\"}, \"nova::compute::rbd::libvirt_rbd_secret_key\": {\"get_param\": \"CephClientKey\"}, \"nova::compute::rbd::libvirt_rbd_user\": {\"get_param\": \"CephClientUserName\"}, \"nova::compute::libvirt::services::libvirt_virt_type\": {\"get_param\": \"NovaComputeLibvirtType\"}, \"nova::compute::libvirt::qemu::max_files\": 32768, \"tripleo::profile::base::nova::migration::client::ssh_private_key\": {\"get_param\": [\"MigrationSshKey\", \"private_key\"]}, \"nova::compute::rbd::rbd_keyring\": {\"list_join\": [\".\", [\"client\", {\"get_param\": \"CephClientUserName\"}]]}, \"nova::compute::libvirt::manage_libvirt_services\": false, \"nova::compute::libvirt::libvirt_enabled_perf_events\": {\"get_param\": \"LibvirtEnabledPerfEvents\"}, \"nova::compute::libvirt::migration_support\": false, \"nova::compute::libvirt::vncserver_listen\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}, \"rbd_persistent_storage\": {\"get_param\": \"CinderEnableRbdBackend\"}, \"nova::compute::libvirt::qemu::max_processes\": 131072, \"nova::compute::libvirt::qemu::configure_qemu\": true, \"tripleo::profile::base::nova::migration::client::ssh_port\": {\"get_param\": \"MigrationSshPort\"}, \"tripleo::profile::base::nova::migration::client::libvirt_enabled\": true}, {\"if\": [\"use_tls_for_live_migration\", {\"tripleo::profile::base::nova::migration::client::libvirt_tls\": true, \"tripleo::certmonger::libvirt_dirs::certificate_dir\": \"/etc/pki/libvirt\", \"nova::migration::libvirt::live_migration_inbound_addr\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"tripleo::certmonger::ca::libvirt::origin_ca_pem\": {\"if\": [\"libvirt_specific_ca_unset\", {\"get_param\": \"InternalTLSCAFile\"}, {\"get_param\": \"LibvirtCACert\"}]}, \"libvirt_certificates_specs\": {\"libvirt-server-cert\": {\"service_certificate\": \"/etc/pki/libvirt/servercert.pem\", \"hostname\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}}, \"template\": \"%{hiera('fqdn_NETWORK')}\"}}, \"service_key\": \"/etc/pki/libvirt/private/serverkey.pem\", \"principal\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}}, \"template\": \"libvirt/%{hiera('fqdn_NETWORK')}\"}}}, \"libvirt-client-cert\": {\"service_certificate\": \"/etc/pki/libvirt/clientcert.pem\", \"hostname\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}}, \"template\": \"%{hiera('fqdn_NETWORK')}\"}}, \"service_key\": \"/etc/pki/libvirt/private/clientkey.pem\", \"principal\": {\"str_replace\": {\"params\": {\"NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}}, \"template\": \"libvirt/%{hiera('fqdn_NETWORK')}\"}}}}, \"tripleo::certmonger::libvirt_dirs::key_dir\": \"/etc/pki/libvirt/private\", \"generate_service_certificates\": true, \"nova::migration::libvirt::listen_address\": {\"get_param\": [\"ServiceNetMap\", \"NovaLibvirtNetwork\"]}}, {}]}]}, \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNovaLibvirt\"}}}}, \"conditions\": {\"libvirt_specific_ca_unset\": {\"equals\": [{\"get_param\": \"LibvirtCACert\"}, \"\"]}, \"use_tls_for_live_migration\": {\"and\": [{\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}, {\"equals\": [{\"get_param\": \"UseTLSTransportForLiveMigration\"}, true]}]}}, \"resources\": {\"NovaBase\": {\"type\": \"http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-base.yaml\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/kernel.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Kernel modules\", \"value\": {\"service_name\": \"kernel\", \"step_config\": \"include ::tripleo::profile::base::kernel\", \"config_settings\": {\"sysctl_settings\": {\"net.ipv6.conf.all.autoconf\": {\"value\": 0}, \"net.ipv4.tcp_keepalive_probes\": {\"value\": 5}, \"kernel.pid_max\": {\"value\": {\"get_param\": \"KernelPidMax\"}}, \"net.ipv4.conf.all.log_martians\": {\"value\": 1}, \"net.ipv4.conf.all.arp_accept\": {\"value\": 1}, \"net.ipv4.conf.all.secure_redirects\": {\"value\": 0}, \"fs.suid_dumpable\": {\"value\": 0}, \"net.ipv4.conf.default.accept_redirects\": {\"value\": 0}, \"net.ipv6.conf.default.autoconf\": {\"value\": 0}, \"net.ipv6.conf.default.accept_redirects\": {\"value\": 0}, \"net.ipv6.conf.all.accept_ra\": {\"value\": 0}, \"net.ipv4.conf.default.secure_redirects\": {\"value\": 0}, \"net.ipv4.conf.default.send_redirects\": {\"value\": 0}, \"net.ipv4.tcp_keepalive_time\": {\"value\": 5}, \"net.nf_conntrack_max\": {\"value\": 500000}, \"net.ipv6.conf.default.accept_ra\": {\"value\": 0}, \"kernel.dmesg_restrict\": {\"value\": 1}, \"net.ipv6.conf.all.accept_redirects\": {\"value\": 0}, \"net.ipv4.conf.default.log_martians\": {\"value\": 1}, \"net.ipv4.conf.all.send_redirects\": {\"value\": 0}, \"net.ipv6.conf.all.disable_ipv6\": {\"value\": {\"get_param\": \"KernelDisableIPv6\"}}, \"net.ipv4.tcp_keepalive_intvl\": {\"value\": 1}, \"net.ipv4.neigh.default.gc_thresh1\": {\"value\": {\"get_param\": \"NeighbourGcThreshold1\"}}, \"net.ipv4.neigh.default.gc_thresh2\": {\"value\": {\"get_param\": \"NeighbourGcThreshold2\"}}, \"net.ipv4.neigh.default.gc_thresh3\": {\"value\": {\"get_param\": \"NeighbourGcThreshold3\"}}, \"net.core.netdev_max_backlog\": {\"value\": 10000}, \"net.ipv6.conf.default.disable_ipv6\": {\"value\": {\"get_param\": \"KernelDisableIPv6\"}}, \"net.netfilter.nf_conntrack_max\": {\"value\": 500000}}, \"kernel_modules\": {\"nf_conntrack\": {}, \"nf_conntrack_proto_sctp\": {}}}}}}, \"heat_template_version\": \"pike\", \"description\": \"Load kernel modules with kmod and configure kernel options with sysctl.\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"KernelPidMax\": {\"default\": 1048576, \"type\": \"number\", \"description\": \"Configures sysctl kernel.pid_max key\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"KernelDisableIPv6\": {\"default\": 0, \"type\": \"number\", \"description\": \"Configures sysctl net.ipv6.{default/all}.disable_ipv6 keys\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"NeighbourGcThreshold1\": {\"default\": 1024, \"type\": \"number\", \"description\": \"Configures sysctl net.ipv4.neigh.default.gc_thresh1 value. This is the minimum number of entries to keep in the ARP cache. The garbage collector will not run if there are fewer than this number of entries in the cache.\"}, \"NeighbourGcThreshold3\": {\"default\": 4096, \"type\": \"number\", \"description\": \"Configures sysctl net.ipv4.neigh.default.gc_thresh3 value. This is the hard maximum number of entries to keep in the ARP cache. The garbage collector will always run if there are more than this number of entries in the cache.\"}, \"NeighbourGcThreshold2\": {\"default\": 2048, \"type\": \"number\", \"description\": \"Configures sysctl net.ipv4.neigh.default.gc_thresh2 value. This is the soft maximum number of entries to keep in the ARP cache. The garbage collector will allow the number of entries to exceed this for 5 seconds before collection will be performed.\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/mysql-client.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role for setting mysql client parameters\", \"value\": {\"service_name\": \"mysql_client\", \"step_config\": \"include ::tripleo::profile::base::database::mysql::client\", \"config_settings\": {\"tripleo::profile::base::database::mysql::client::mysql_client_bind_address\": {\"get_param\": [\"ServiceNetMap\", \"MysqlNetwork\"]}, \"tripleo::profile::base::database::mysql::client::ssl_ca\": {\"get_param\": \"InternalTLSCAFile\"}, \"tripleo::profile::base::database::mysql::client::enable_ssl\": {\"get_param\": \"EnableInternalTLS\"}}}}}, \"heat_template_version\": \"pike\", \"description\": \"Mysql client settings\
0.355 | 3311: \", \"parameters\": {\"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"InternalTLSCAFile\": {\"default\": \"/etc/ipa/ca.crt\", \"type\": \"string\", \"description\": \"Specifies the default CA cert to use if TLS is used for services in the internal network.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/glance-api.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"OpenStack Glance API service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"GlanceRbdPoolName\": {\"default\": \"images\", \"type\": \"string\"}, \"GlanceNotifierStrategy\": {\"default\": \"noop\", \"type\": \"string\", \"description\": \"Strategy to use for Glance notification queue\"}, \"GlanceWorkers\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Number of API worker processes for Glance. If left unset (empty string), the\
0.355 | 3311: default value will result in the configuration being left unset and a\
0.355 | 3311: system-dependent default value will be chosen (e.g.: number of\
0.355 | 3311: processors). Please note that this will create a large number of\
0.355 | 3311: processes on systems with a large number of CPUs resulting in excess\
0.355 | 3311: memory consumption. It is recommended that a suitable non-default value\
0.355 | 3311: be selected on such systems.\
0.355 | 3311: \"}, \"NotificationDriver\": {\"default\": \"messagingv2\", \"type\": \"string\", \"description\": \"Driver or drivers to handle sending notifications.\", \"constraints\": [{\"allowed_values\": [\"messagingv2\", \"noop\"]}]}, \"KeystoneRegion\": {\"default\": \"regionOne\", \"type\": \"string\", \"description\": \"Keystone region for endpoint\"}, \"GlanceBackend\": {\"default\": \"swift\", \"type\": \"string\", \"description\": \"The short name of the Glance backend to use. Should be one of swift, rbd, or file\", \"constraints\": [{\"allowed_values\": [\"swift\", \"file\", \"rbd\"]}]}, \"RabbitClientUseSSL\": {\"default\": false, \"type\": \"string\", \"description\": \"Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.\
0.355 | 3311: \"}, \"GlanceApiPolicies\": {\"default\": {}, \"type\": \"json\", \"description\": \"A hash of policies to configure for Glance API.\
0.355 | 3311: e.g. { glance-context_is_admin: { key: context_is_admin, value: 'role:admin' } }\
0.355 | 3311: \"}, \"RabbitPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for RabbitMQ\"}, \"GlanceNfsEnabled\": {\"default\": false, \"type\": \"boolean\", \"description\": \"When using GlanceBackend 'file', mount NFS share for image storage.\
0.355 | 3311: \"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"GlanceNfsOptions\": {\"default\": \"intr,context=system_u:object_r:glance_var_lib_t:s0\", \"type\": \"string\", \"description\": \"NFS mount options for image storage (when GlanceNfsEnabled is true)\
0.355 | 3311: \"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"RabbitUserName\": {\"default\": \"guest\", \"type\": \"string\", \"description\": \"The username for RabbitMQ\"}, \"NovaEnableRbdBackend\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether to enable or not the Rbd backend for Nova\"}, \"Debug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging on all services.\"}, \"GlancePassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the glance service and db account, used by the glance services.\"}, \"CephClientUserName\": {\"default\": \"openstack\", \"type\": \"string\"}, \"GlanceLogFile\": {\"default\": \"\", \"type\": \"string\", \"description\": \"The filepath of the file to use for logging messages from Glance.\"}, \"MonitoringSubscriptionGlanceApi\": {\"default\": \"overcloud-glance-api\", \"type\": \"string\"}, \"GlanceNfsShare\": {\"default\": \"\", \"type\": \"string\", \"description\": \"NFS share to mount for image storage (when GlanceNfsEnabled is true)\
0.355 | 3311: \"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"RabbitClientPort\": {\"default\": 5672, \"type\": \"number\", \"description\": \"Set rabbit subscriber port, change this if using SSL\"}, \"GlanceDebug\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Set to True to enable debugging Glance service.\"}, \"GlanceApiLoggingSource\": {\"default\": {\"path\": \"/var/log/glance/api.log\", \"tag\": \"openstack.glance.api\"}, \"type\": \"json\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"EnableInternalTLS\": {\"default\": false, \"type\": \"boolean\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the Glance API role.\", \"value\": {\"config_settings\": {\"map_merge\": [{\"get_attr\": [\"TLSProxyBase\", \"role_data\", \"config_settings\"]}, {\"glance::api::bind_host\": {\"if\": [\"use_tls_proxy\", \"localhost\", {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}]}, \"glance_backend\": {\"get_param\": \"GlanceBackend\"}, \"glance::keystone::authtoken::project_domain_name\": \"Default\", \"glance::api::enable_v1_api\": false, \"tripleo::glance::nfs_mount::share\": {\"get_param\": \"GlanceNfsShare\"}, \"glance::api::bind_port\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"port\"]}, \"glance::api::enable_proxy_headers_parsing\": true, \"glance::backend::swift::swift_store_auth_address\": {\"get_param\": [\"EndpointMap\", \"KeystoneV3Internal\", \"uri\"]}, \"glance::policy::policies\": {\"get_param\": \"GlanceApiPolicies\"}, \"glance_log_file\": {\"get_param\": \"GlanceLogFile\"}, \"glance::api::show_image_direct_url\": true, \"glance::api::authtoken::project_name\": \"service\", \"tripleo::profile::base::glance::api::glance_nfs_enabled\": {\"get_param\": \"GlanceNfsEnabled\"}, \"glance::keystone::authtoken::user_domain_name\": \"Default\", \"glance::api::os_region_name\": {\"get_param\": \"KeystoneRegion\"}, \"tripleo::profile::base::glance::api::tls_proxy_bind_ip\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}, \"glance::api::enable_v2_api\": true, \"glance::notify::rabbitmq::rabbit_use_ssl\": {\"get_param\": \"RabbitClientUseSSL\"}, \"tripleo::profile::base::glance::api::tls_proxy_fqdn\": {\"str_replace\": {\"params\": {\"$NETWORK\": {\"get_param\": [\"ServiceNetMap\", \"GlanceApiNetwork\"]}}, \"template\": \"%{hiera('fqdn_$NETWORK')}\"}}, \"glance::notify::rabbitmq::rabbit_password\": {\"get_param\": \"RabbitPassword\"}, \"tripleo::profile::base::glance::api::tls_proxy_port\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"port\"]}, \"glance::api::database_connection\": {\"make_url\": {\"username\": \"glance\", \"host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host\"]}, \"password\": {\"get_param\": \"GlancePassword\"}, \"path\": \"/glance\", \"query\": {\"read_default_group\": \"tripleo\", \"read_default_file\": \"/etc/my.cnf.d/tripleo.cnf\"}, \"scheme\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"protocol\"]}}}, \"glance::api::authtoken::password\": {\"get_param\": \"GlancePassword\"}, \"glance::notify::rabbitmq::notification_driver\": {\"get_param\": \"NotificationDriver\"}, \"tripleo::glance::nfs_mount::options\": {\"get_param\": \"GlanceNfsOptions\"}, \"glance::backend::swift::swift_store_auth_version\": 3, \"tripleo.glance_api.firewall_rules\": {\"112 glance_api\": {\"dport\": [9292, 13292]}}, \"glance_notifier_strategy\": {\"get_param\": \"GlanceNotifierStrategy\"}, \"glance::backend::swift::swift_store_create_container_on_put\": true, \"glance::notify::rabbitmq::rabbit_userid\": {\"get_param\": \"RabbitUserName\"}, \"glance::api::debug\": {\"if\": [\"service_debug_unset\", {\"get_param\": \"Debug\"}, {\"get_param\": \"GlanceDebug\"}]}, \"glance::backend::swift::swift_store_user\": \"service:glance\", \"glance::backend::rbd::rbd_store_pool\": {\"get_param\": \"GlanceRbdPoolName\"}, \"glance::api::authtoken::auth_url\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"glance::api::pipeline\": \"keystone\", \"glance::api::authtoken::auth_uri\": {\"get_param\": [\"EndpointMap\", \"KeystoneInternal\", \"uri_no_suffix\"]}, \"glance::notify::rabbitmq::rabbit_port\": {\"get_param\": \"RabbitClientPort\"}, \"glance::backend::swift::swift_store_key\": {\"get_param\": \"GlancePassword\"}, \"glance::backend::rbd::rbd_store_user\": {\"get_param\": \"CephClientUserName\"}, \"glance::api::show_multiple_locations\": {\"if\": [\"glance_multiple_locations\", true, false]}}, {\"if\": [\"glance_workers_unset\", {}, {\"glance::api::workers\": {\"get_param\": \"GlanceWorkers\"}}]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"glance_api_enabled\", \"command\": \"systemctl is-enabled openstack-glance-api\", \"name\": \"Check if glance_api is deployed\", \"tags\": \"common\"}, {\"ignore_errors\": true, \"register\": \"glance_registry_enabled\", \"command\": \"systemctl is-enabled openstack-glance-registry\", \"name\": \"Check if glance_registry is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'openstack-glance-api' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"glance_api_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service openstack-glance-api is running\", \"tags\": \"step0,validation\"}, {\"when\": \"glance_api_enabled.rc == 0\", \"name\": \"Stop glance_api service\", \"service\": \"name=openstack-glance-api state=stopped\", \"tags\": \"step1\"}, {\"when\": \"glance_registry_enabled.rc == 0\", \"name\": \"Stop and disable glance registry (removed for Ocata)\", \"service\": \"name=openstack-glance-registry state=stopped enabled=no\", \"tags\": \"step1\"}], \"logging_groups\": [\"glance\"], \"service_name\": \"glance_api\", \"step_config\": \"include ::tripleo::profile::base::glance::api\
0.355 | 3311: \", \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionGlanceApi\"}, \"service_config_settings\": {\"keystone\": {\"glance::keystone::auth::tenant\": \"service\", \"glance::keystone::auth::admin_url\": {\"get_param\": [\"EndpointMap\", \"GlanceAdmin\", \"uri\"]}, \"glance::keystone::auth::region\": {\"get_param\": \"KeystoneRegion\"}, \"glance::keystone::auth::public_url\": {\"get_param\": [\"EndpointMap\", \"GlancePublic\", \"uri\"]}, \"glance::keystone::auth::password\": {\"get_param\": \"GlancePassword\"}, \"glance::keystone::auth::internal_url\": {\"get_param\": [\"EndpointMap\", \"GlanceInternal\", \"uri\"]}}, \"mysql\": {\"glance::db::mysql::password\": {\"get_param\": \"GlancePassword\"}, \"glance::db::mysql::host\": {\"get_param\": [\"EndpointMap\", \"MysqlInternal\", \"host_nobrackets\"]}, \"glance::db::mysql::dbname\": \"glance\", \"glance::db::mysql::allowed_hosts\": [\"%\", \"%{hiera('mysql_bind_host')}\"], \"glance::db::mysql::user\": \"glance\"}}, \"logging_source\": {\"get_param\": \"GlanceApiLoggingSource\"}}}}, \"conditions\": {\"glance_workers_unset\": {\"equals\": [{\"get_param\": \"GlanceWorkers\"}, \"\"]}, \"use_tls_proxy\": {\"equals\": [{\"get_param\": \"EnableInternalTLS\"}, true]}, \"service_debug_unset\": {\"equals\": [{\"get_param\": \"GlanceDebug\"}, \"\"]}, \"glance_multiple_locations\": {\"and\": [{\"equals\": [{\"get_param\": \"GlanceBackend\"}, \"rbd\"]}, {\"equals\": [{\"get_param\": \"NovaEnableRbdBackend\"}, true]}]}}, \"resources\": {\"TLSProxyBase\": {\"type\": \"OS::TripleO::Services::TLSProxyBase\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"EnableInternalTLS\": {\"get_param\": \"EnableInternalTLS\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/haproxy.yaml": "{\"heat_template_version\": \"pike\", \"description\": \"HAproxy service configured with Puppet\
0.355 | 3311: \", \"parameters\": {\"InternalTLSCAFile\": {\"default\": \"/etc/ipa/ca.crt\", \"type\": \"string\", \"description\": \"Specifies the default CA cert to use if TLS is used for services in the internal network.\"}, \"HAProxySyslogAddress\": {\"default\": \"/dev/log\", \"type\": \"string\", \"description\": \"Syslog address where HAproxy will send its log\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"RedisPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the redis service account.\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"HAProxyStatsUser\": {\"default\": \"admin\", \"type\": \"string\", \"description\": \"User for HAProxy stats endpoint\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"HAProxyStatsPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"Password for HAProxy stats endpoint\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}, \"InternalTLSCRLPEMFile\": {\"default\": \"/etc/pki/CA/crl/overcloud-crl.pem\", \"type\": \"string\", \"description\": \"Specifies the default CRL PEM file to use for revocation if TLS is used for services in the internal network.\"}, \"SSLCertificate\": {\"default\": \"\", \"type\": \"string\", \"description\": \"The content of the SSL certificate (without Key) in PEM format.\
0.355 | 3311: \"}, \"MonitoringSubscriptionHaproxy\": {\"default\": \"overcloud-haproxy\", \"type\": \"string\"}, \"HAProxyStatsEnabled\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether or not to enable the HAProxy stats interface.\"}, \"DeployedSSLCertificatePath\": {\"default\": \"/etc/pki/tls/private/overcloud_endpoint.pem\", \"type\": \"string\", \"description\": \"The filepath of the certificate as it will be stored in the controller.\
0.355 | 3311: \"}, \"EnableLoadBalancer\": {\"default\": true, \"type\": \"boolean\", \"description\": \"Whether to deploy a LoadBalancer, set to false when an external load balancer is used.\"}}, \"outputs\": {\"role_data\": {\"description\": \"Role data for the HAproxy role.\", \"value\": {\"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionHaproxy\"}, \"config_settings\": {\"map_merge\": [{\"tripleo.haproxy.firewall_rules\": {\"107 haproxy stats\": {\"dport\": 1993}}, \"tripleo::haproxy::haproxy_stats_password\": {\"get_param\": \"HAProxyStatsPassword\"}, \"tripleo::haproxy::redis_password\": {\"get_param\": \"RedisPassword\"}, \"enable_load_balancer\": {\"get_param\": \"EnableLoadBalancer\"}, \"tripleo::haproxy::ca_bundle\": {\"get_param\": \"InternalTLSCAFile\"}, \"tripleo::haproxy::crl_file\": {\"get_param\": \"InternalTLSCRLPEMFile\"}, \"tripleo::haproxy::haproxy_stats\": {\"get_param\": \"HAProxyStatsEnabled\"}, \"tripleo::profile::base::haproxy::certificates_specs\": {\"map_merge\": [{\"get_attr\": [\"HAProxyPublicTLS\", \"role_data\", \"certificates_specs\"]}, {\"get_attr\": [\"HAProxyInternalTLS\", \"role_data\", \"certificates_specs\"]}]}, \"tripleo::haproxy::haproxy_stats_user\": {\"get_param\": \"HAProxyStatsUser\"}, \"tripleo::haproxy::haproxy_log_address\": {\"get_param\": \"HAProxySyslogAddress\"}}, {\"if\": [\"public_tls_enabled\", {\"tripleo::haproxy::service_certificate\": {\"get_param\": \"DeployedSSLCertificatePath\"}}, {}]}, {\"get_attr\": [\"HAProxyPublicTLS\", \"role_data\", \"config_settings\"]}, {\"get_attr\": [\"HAProxyInternalTLS\", \"role_data\", \"config_settings\"]}]}, \"upgrade_tasks\": [{\"ignore_errors\": true, \"register\": \"haproxy_enabled\", \"command\": \"systemctl is-enabled haproxy\", \"name\": \"Check if haproxy is deployed\", \"tags\": \"common\"}, {\"shell\": \"/usr/bin/systemctl show 'haproxy' --property ActiveState | grep '\\\\bactive\\\\b'\", \"when\": \"haproxy_enabled.rc == 0\", \"name\": \"PreUpgrade step0,validation: Check service haproxy is running\", \"tags\": \"step0,validation\"}, {\"when\": \"haproxy_enabled.rc == 0\", \"name\": \"Stop haproxy service\", \"service\": \"name=haproxy state=stopped\", \"tags\": \"step2\"}, {\"when\": \"haproxy_enabled.rc == 0\", \"name\": \"Start haproxy service\", \"service\": \"name=haproxy state=started\", \"tags\": \"step4\"}], \"metadata_settings\": {\"list_concat\": [{\"get_attr\": [\"HAProxyPublicTLS\", \"role_data\", \"metadata_settings\"]}, {\"get_attr\": [\"HAProxyInternalTLS\", \"role_data\", \"metadata_settings\"]}]}, \"service_name\": \"haproxy\", \"step_config\": \"include ::tripleo::profile::base::haproxy\
0.355 | 3311: \"}}}, \"conditions\": {\"public_tls_enabled\": {\"not\": {\"equals\": [{\"get_param\": \"SSLCertificate\"}, \"\"]}}}, \"resources\": {\"HAProxyInternalTLS\": {\"type\": \"OS::TripleO::Services::HAProxyInternalTLS\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}, \"HAProxyPublicTLS\": {\"type\": \"OS::TripleO::Services::HAProxyPublicTLS\", \"properties\": {\"ServiceData\": {\"get_param\": \"ServiceData\"}, \"DefaultPasswords\": {\"get_param\": \"DefaultPasswords\"}, \"EndpointMap\": {\"get_param\": \"EndpointMap\"}, \"RoleName\": {\"get_param\": \"RoleName\"}, \"ServiceNetMap\": {\"get_param\": \"ServiceNetMap\"}, \"RoleParameters\": {\"get_param\": \"RoleParameters\"}}}}}", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/deployed-server/deployed-server-bootstrap-centos.sh": "#!/bin/bash
0.355 | 3311:
0.355 | 3311: set -eux
0.355 | 3311:
0.355 | 3311: yum install -y \\
0.355 | 3311: jq \\
0.355 | 3311: python-ipaddr \\
0.355 | 3311: openstack-puppet-modules \\
0.355 | 3311: os-net-config \\
0.355 | 3311: openvswitch \\
0.355 | 3311: python-heat-agent* \\
0.355 | 3311: openstack-selinux
0.355 | 3311:
0.355 | 3311: ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
0.355 | 3311:
0.355 | 3311: setenforce 0
0.355 | 3311: sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
0.355 | 3311:
0.355 | 3311: echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
0.355 | 3311: echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
0.355 | 3311: ", "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-midonet.yaml": "{\"outputs\": {\"role_data\": {\"description\": \"Role data for the Neutron Midonet plugin and services\", \"value\": {\"service_name\": \"neutron_midonet\", \"step_config\": \"include tripleo::profile::base::neutron::plugins::midonet\", \"config_settings\": {\"tripleo::profile::base::neutron::midonet::zk_on_controller\": {\"get_param\": \"EnableZookeeperOnController\"}, \"neutron::service_plugins\": [], \"tripleo::profile::base::neutron::midonet::neutron_auth_tenant\": \"service\", \"enable_cassandra_on_controller\": {\"get_param\": \"EnableCassandraOnController\"}, \"tripleo::profile::base::neutron::midonet::neutron_auth_password\": {\"get_param\": \"NeutronPassword\"}, \"tripleo::profile::base::neutron::midonet::keystone_admin_token\": {\"get_param\": \"AdminToken\"}, \"tripleo::profile::base::neutron::midonet::admin_password\": {\"get_param\": \"AdminPassword\"}}, \"monitoring_subscription\": {\"get_param\": \"MonitoringSubscriptionNeutronMidonet\"}}}}, \"heat_template_version\": \"pike\", \"description\": \"OpenStack Neutron Midonet plugin and services\
0.355 | 3311: \", \"parameters\": {\"MonitoringSubscriptionNeutronMidonet\": {\"default\": \"overcloud-neutron-midonet\", \"type\": \"string\"}, \"DefaultPasswords\": {\"default\": {}, \"type\": \"json\"}, \"EnableCassandraOnController\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether enable Cassandra cluster on Controller\", \"label\": \"Enable Cassandra On Controller\"}, \"AdminPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the keystone admin account, used for monitoring, querying neutron etc.\"}, \"ServiceNetMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults.\"}, \"NeutronPassword\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The password for the neutron service and db account, used by neutron agents.\"}, \"ServiceData\": {\"default\": {}, \"type\": \"json\", \"description\": \"Dictionary packing service data\"}, \"EndpointMap\": {\"default\": {}, \"type\": \"json\", \"description\": \"Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.\"}, \"EnableZookeeperOnController\": {\"default\": false, \"type\": \"boolean\", \"description\": \"Whether enable Zookeeper cluster on Controller\", \"label\": \"Enable Zookeeper On Controller\"}, \"RoleName\": {\"default\": \"\", \"type\": \"string\", \"description\": \"Role name on which the service is applied\"}, \"AdminToken\": {\"hidden\": true, \"type\": \"string\", \"description\": \"The keystone auth secret and db password.\"}, \"RoleParameters\": {\"default\": {}, \"type\": \"json\", \"description\": \"Parameters specific to the role\"}}}"}, "environment": {"parameter_defaults": {"KeystoneFernetKey1": "mgifMn33EUknY7Mp1MVf-XaugCgtrYFRzaCV1wcQHL0=", "KeystoneFernetKey0": "81Dp-iNNBv9-WoJEW4pv2RVUSRyNpG2O5Y9XKI896pw=", "HAProxyStatsPassword": "pUM4mMBtauMzgrm4qJAEWfFW4", "GlanceWorkers": 1, "NotificationDriver": "noop", "HeatPassword": "WvkTkH8A8hYvMmkDptPP3JFfR", "CephManilaClientKey": "AQD8z7pZAAAAABAA0ZoKNZfY5lMalTgG9ghJlw==", "CongressPassword": "zV28YATHwt9uYqa7gt4zMzsxc", "NeutronPassword": "CfwCTwva3WUsK4d4yZzUvYn8P", "DatabaseSyncTimeout": 900, "EnablePackageInstall": true, "SnmpdReadonlyUserPassword": "61adadedbbb7d2aec93d23785b0adca1b5febe07", "GlancePassword": "72RVTQDG4fyfewQDxEc2penCK", "ManilaPassword": "BN3GKrgD2bfbfQEVcMaxjdeMc", "CloudNameInternal": "overcloud.internalapi.localdomain", "ControllerExtraConfig": {"heat::rpc_response_timeout": 600, "heat::api_cloudwatch::enabled": false, "nova::network::neutron::neutron_url_timeout": "60", "heat::api_cfn::enabled": false, "nova::compute::libvirt::services::libvirt_virt_type": "qemu", "nova::compute::libvirt::libvirt_virt_type": "qemu"}, "EtcdInitialClusterToken": "hpdYtXMNMJCXbszuHKaM2Buxs", "IronicPassword": "dG4cMnMwRJjKj7xcwa3HzsXQG", "CloudDomain": "localdomain", "ApacheServerLimit": 100, "SaharaWorkers": 1, "ZaqarPassword": "t6MzA37H6V3H9MY2gEtWhY2pB", "NovaComputeExtraConfig": {"nova::compute::libvirt::libvirt_cpu_mode": "none"}, "SwiftHashSuffix": "RNRakYZM6kFX2nKsV8EGf47Ra", "OctaviaHeartbeatKey": "TvTECtVaZRZUyTRMYeve6dF4g", "ComputeCount": 0, "KeystoneTokenProvider": "fernet", "SwiftWorkers": 1, "CephAdminKey": "AQD8z7pZAAAAABAA14CS11bOmNubwI753QzMDQ==", "GnocchiPassword": "sHqYB4cF7evf6fe7HkXKNK2R6", "PacemakerRemoteAuthkey": "rzG9xbnaxvsEC6VkkaqRXGgTwDRuA9V4nRVDUCp4se46EV88Cs7sCtQQJQW6zErwztFGsDaKx9qUJzqGyaEeYrwKECXWrEXGaUf9Bm2jEhMn6cBMeQb6BhjjfmXMumAZ77hJT4QJ9rwXzfTGcdX2cHdRA8tsejWXKWWtbyj8j3bxf3PnZbXfCUV6kGNzVPfhkzVB46n83EVX7RYKuQDCMccXyZjmXdkt9v2enXUGnhzTeAk6RBCdBDbRRMTk6GJYKcz6P9gfkQHzCdjMnyayAnBz3CDmq3JGYsQhVqQpnuubuy7zGwFh9pdrxedREbykWUERK6RjzbW38zRGcwNn9f7RCEugFmKjJmqV4FcwcKafpGDhvBRruheJbh2mzP7Anp8sfFJVekM7vYnhakTy2ewH84tdQwnWKneruB8fPwCkBjrAHqf3Gtgss9pAcK6naEbj7Wnf6a8pJPCqBxBC2UvEAJTPJKwFaw6cnpAh6jVRG9Eq4aswhntMrXzxqvUyTpKzRguqYdmKmNqExmGR7jtD8FgkcC8KERkjw9DfGr6vUNdvmQMTHUzNQskwEQCRD4vEjgryXApdAyvX6ZQ83evUqHCzXqpgmY6ebc8mzrAG8DcEAkAHYtcHn4JHjV6dVPFHAccGMAsHHYvkvBFEW7864a6RrY6UHVwHJzBq24zpgcAXHB3cMthXHmX9JYvwnNG9czxeYfRdpAAZtTyTC98uGQ3GvPHhxZT2DX4pm6vasxnhdGgXAPjwTQ9jq9RbjQgsZXYCZkFMxhsAP9bBERqDCD63wq4DkpfWrDB9FaRfN7zPeuswk3s2UctGtzkKyjfwtvXtAJDBgXZYvpFGzza9BHRWXKMbUeq7W7VR64myeKDykj3WVRrBCcwrqPR6ZfnE2BhGEh9R4KP8UCDcBG8xv2UmyPVg7GrdfBgVpbbg6jHCjvEZNRfx9CRcnhjFrkWBprXdTxGe3wTjxpAu37Bcd2wFFfb9cqatZsbq9FPVMrFBpTBeN7rqwPkjnsqFQ9qUNDkksfphUFVhGetyn2rXaMZ99tRPM7NwkY8uh2bCfyM8zm8QEbRG7vpDXNjctGDPjGbTf9jW4rFzeKxrjETaMftnR7pFeBYDtftrRmZ9NPewdPMzNKtD4NNVPTzwAXumqCj2fg9mWEPDaGwb8eKgPAUrsqkAgsYksgvdYCgVMBzkw2erM9chg2Jxr2qCubcj7fwvtpg79Fxgaj2bdqDKt9DcCQvb8MA8bXuJQUR63YECBKdmnhfg4dD89db86buejrRXEyaQUMECwXfFzwTJj78ybascPxjUuTJnw2zY6aFtaXaG4bqq9J2BVkKjbvFu9z2f3aQgzUsTCZcPMyszVmAW8cseePGakTgf3D9DBTRfCM4agbXjXnNpAMv3ZmCk8rZnBxB9DvAzjxeUy4jJZFvN266TgZ3xt7h7xeKVBACqNUJ28TKbF6wvFVDcD64ZWMfmqdZspKDz88EZ44eYnqTKGnnwAPsJ3E3AGKs4gFkJjEYcyqRnvZetwqjmnj3EzW2yDpgsZMrUW4juxFkKa4bXAPJAyUDBTprm8qaezf8j9edqy46ZBF8Qv4Nq2nc2Drd6knufMDU7CW7BHa9W9zPx6UvsFpast8PVXWAEXB3n7ueVvEET4DVDKm32kr7BRkxfA9a7KjHWTjjne3Rsk2DjC2eeuUmGURRvtBdgbe2JTkmMe9t9CMYwWUz3gGBmt6rmw2UJ983z6xMtTvAgtb4TGPvpAeB4DYZvuqn6MjZzCcHapEg9GUXMzdudhnAJbm3WFTC2Gm694APVRAzGGHYVqPJUyCx2cFaDZkCw23TQAsjnVZWtP6HDQdphcat3DaJAUYcqhYYVVeEzFC83Xj7brcvGcz2dBWtAJu4MBxu9hjnMr4YtTH8hdXQpAAPf4marsTXgkbkyaXs3m3KCMv3ewMjUjrsmrhsb7Hf9gAtPj4tFy8NAW8UNYnbt7Rrc4cKRVAw7RbXndhTRtwvTfUvXZxNhcGpw2QrnVcM9KdsBWfAuAZ969rvpN9tYU23MvawVNAa4nkrETMX8hE4Aj3ENVVqaf7kfwu8UKA4bztKdgHJrtkAgEFAqAzdUgkcdYb9hQYxjEFDM34QZePsDFhUqpug3uB8kAVtUfa4Uj7hkVUxCDAdsbyUfjuktgVUDWWuTdrMfJZ3Au82Y8TjJ3yaMQ6aWEgzC6uxTkeC9Rhy9uyrryHQTUf3EC7qPm4DyTjz98AQMwzCaaVnPEcBCxR4FXjhDaFewt3naTF3DdG3vcUupawA3w6dGPFMf2AQkebQf7aVyheVvbTvRpGuHP2dZHcYxAdtQ8KqZfEvpZskjPhvJ3dWqUs2RbGVepB7FFvEHjXcrCUstAt4RJpkwXek7bHZUVPtsaebg2hTyKsfrM7zX2Kkp2f4BWHYsErVYNJYn3NPvfCTasWd6smRGuYfx99RyrXKn6rJ9bRe7khshT8rhXPkW3CeCTsDARnWszTm2mW9cV77ZBNuwhRDtKNBQMKTFpmXgE3BzuVxNPEhTZPBtHgVAPan9hqdQcnjGFeHM6EQMbKQEYtG7xFUnzaNprRgc9fzKegT4p4XkjgQTQPJsTqdATMYjygnNDfRatzN6DTJrJTDBRH9TfATMPWfwunqjCsjZyqkQ6UXPPkkHbdvy2jV6YkeBXqrVaJAzxdMM6tNMVtgAsUVveDzt9TbkK9y9UTqrWjabzcc6CCKzkgAkZvQv6dyvnNYuGWmEEwhxwAUurZCFqwYayzUsMy7KnCU9Dphv89sUeMNVXe4MYcAECA3HEqmMNQxMMyWemmgvcQJXaCNCdyhZzEveMxeZHhRHzR3HuPuW7ffCyEv8KGDG98hBEVXJuzvV6Bwp2hXcYHNuFXaUdWCsgHT4YhrkEKmrzsneJntgMUfhaTBCm78YR4deMxRZ49UX3NtbkNnNJgseXHYmN2X6qvMJV24CTUgmUVDch3pBaB7s2JKdRW77tspvJ7eNAN3qHah9xQKBtXrdp8JC8VQ6kJsxgZ6arKZpuX3EpMgEMp7mP8pXhkXgsDFzpdRY62wRDbmJCa9nCypFe8fFBfqqeN6hRfC6eqT2hjM2kEy4gRcUK62yU2K9kZYVpDC2sWvUvKRgXDyNB9XAvWkwYXraV9dchkBZ3yk6AbUwaKkB7RGU63seYcpDqte4PXgEAbbXfrq9XKu4AkbN6D8EdrydHA3c9FAK6F4FkXCwT44ajbkF2wnPkfDTEWqneARhmbwED7zxQCQz7EGNw6evE9kwetPsDkjWsrAkGzK4pBZgGPKKrTDGspdEM77My3dsHwTQHJZpMPmYW8uaUfETkNNQPRWkxd2WZHmU2AdTcCCdVexVzgtFkdABn2dpgKtarKg7YXVKFwx724ar8g9yMUQPNEKp2zmHG399CzbDqGEhjWEH4VpAunHz4KR82FAMB77vwNpdTGxTBQdPDdWhrHAk4g2gv7nyX4sqhQy9xm4tw2bE2ansp7xYBhTFArNPtknnT8ckz29DVNkZYUwb2n6YH9kbmecxnCMkuBuCf8JvKMB2FHjQBpY673r6BWxdda7ufta4Te3XUdjhg2hU7RbZGCyaRd2fHabgXCyzBKUqzsukN8RdFzZcx3dcbHKzMbGyp7MPHUeEfgeTCHGRuFb38gKervYw7zJnxgB8x4m2FDemcyyapHqq7vTfD342KjqEAxj2bYW2yrygggud3rXTKQDzKcrREyacXFnKu7e3K7KsUXTmvWejyvHVusRTZZg98YqAEpYwuYgaqetuBH2PpbWP8Kpzvem7AVnDBzzTue4nk9UMRRmAr8GMGNsEPwQBkp2AVYgnR26CzKmmJBAsUHMK9MV7Z97M9bN33XcdHgwkmNPJptmMvqeNWFPeMXyHWkGwwXrzE68eUDYjAjKqpsERDyR7GeRxZfXrwgsvs92z3fAzX4u9jaxw7RrffPadX2H3PAHDNGbvd72E2yy29wuT8asBNjWbTEkUsrsrA77sfwFPgpAA3HMPXpxX4W27GgNDyEsRvW9dnABNF6dfrFsrTkhJ7BmpA87dTttejeyNwBj3YbpRHpmramDEqZsQ", "ApacheMaxRequestWorkers": 100, "SaharaPassword": "fraZ6CH6HPUaxJG6VBRRBTQFV", "Ec2ApiPassword": "tWkBDFvsqguKx8YbqbMp9gqzh", "SoftwareConfigTransport": "POLL_TEMP_URL", "CeilometerPassword": "dgz9dTgjBtdDwbcRPbRW2M4qP", "NovajoinPassword": "uvZm8XjhEBRubszZ8fPawTDFR", "CinderPassword": "B2DTzp2A6tfKWccvMfdjygTwa", "CloudNameStorage": "overcloud.storage.localdomain", "HeatWorkers": 1, "KeystoneFernetKeys": {"/etc/keystone/fernet-keys/0": {"content": "TJcfX9uVjVupqvW2CChdO5Kg5sMtVS_wmq3QY9sxqS4="}, "/etc/keystone/fernet-keys/1": {"content": "DbgrrwKJNjYlLqe-Eul_4zJeAznXjy31wCViVGiQ_ps="}}, "CeilometerWorkers": 1, "CloudName": "overcloud.localdomain", "CloudNameCtlplane": "overcloud.ctlplane.localdomain", "GlanceBackend": "file", "CephClusterFSID": "e4927ed2-997d-11e7-a8e0-fa163eb88bc3", "MysqlClustercheckPassword": "jA3a8xBHhHPktvveMFwmK7gxF", "NeutronWorkers": 1, "StackAction": "CREATE", "RabbitPassword": "DMpfjQzKFdTh26GpBb83VyJjs", "TrovePassword": "z3AbdtG7NF6ZdmgGKpMbGFpQC", "PankoPassword": "YTMBPGH3PsCsapV7CedMqRD3k", "CephClientKey": "AQD8z7pZAAAAABAA3ra0CIECKhLZFF5bO3H8sw==", "AodhPassword": "C6jgV3mXNJxPpazYj2Ymh74ds", "KeystoneCredential0": "eyJTsxlRlGZ-ZeN-BFDMs80x6L7McVfVVCe8uuONpKY=", "KeystoneCredential1": "0Df2F-KQ_eFhQH2b9-wqtzZRg7c5PRmdU-LCYKlf6mU=", "Debug": true, "HeatStackDomainAdminPassword": "2D7YCgrGdNetbUxkhQsxdT9mt", "SwiftCeilometerPipelineEnabled": false, "CloudNameStorageManagement": "overcloud.storagemgmt.localdomain", "NovaPassword": "Baq7zRVmDVdgsa7Pgre26EpEw", "AdminToken": "XHdpW9t7BBfCTWJ46JhRmzpa3", "GnocchiMetricdWorkers": 1, "RedisPassword": "u4eGJVPyhvcjBXqrnykhP2rG9", "MistralPassword": "8s4W76Ts4rXcUPQ2K7CHEXBeE", "NovaComputeLibvirtType": "qemu", "KeystoneWorkers": 1, "TackerPassword": "TKDa9GwFH3EnQUBz3gRGBhCBD", "MigrationSshKey": {"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzIIqcebaS/enP6w6ZtSSgStrsI79tQCJ6k8Afp6js1qQK0xy7W5Ek7J8L3Nq4z7qIKTnTsp6QLEjwNsHVhB9VDLQybZGIOM8I8riPjh4J2OPHu3ly2qmLsR33uafB/sh2NNN1FcUoSGjg2G26IVlklVpMfEsNRlsTCZP91RqithLPFmMd1Ee7dMe5gVr4960AxR8SZs7CsCEeh6KZ1cnBYNhwPywk15DAr8DFJqAh0SbPS2fS1AJuzL45e+aedDLo+mAZTVZH/TGn+p7mGK3M6W06eB6PQ1B6ImIalcZsn1Rr1p9e89HoDX4nvn5RqreymlZy2FFIGuj6jRfi9ZPh Generated by TripleO", "private_key": "-----BEGIN RSA PRIVATE KEY-----
0.355 | 3311: MIIEowIBAAKCAQEAsyCKnHm2kv3pz+sOmbUkoEra7CO/bUAiepPAH6eo7NakCtMc
0.355 | 3311: u1uRJOyfC9zauM+6iCk507KekCxI8DbB1YQfVQy0Mm2RiDjPCPK4j44eCdjjx7t5
0.355 | 3311: ctqpi7Ed97mnwf7IdjTTdRXFKEho4NhtuiFZZJVaTHxLDUZbEwmT/dUaorYSzxZj
0.355 | 3311: HdRHu3THuYFa+PetAMUfEmbOwrAhHoeimdXJwWDYcD8sJNeQwK/AxSagIdEmz0tn
0.355 | 3311: 0tQCbsy+OXvmnnQy6PpgGU1WR/0xp/qe5hitzOltOngej0NQeiJiGpXGbJ9Ua9af
0.355 | 3311: XvPR6A1+J75+Uaq3sppWcthRSBro+o0X4vWT4QIDAQABAoIBAFhGXCjazn/IA/bE
0.355 | 3311: BxD2tRMNcK/Yuhiz94ni0FaJQHVfHb575Xfrmy7QMV0ePOfA71L0xhuutfek61j+
0.355 | 3311: Ey4PC4XVWeRUHNk97cGiJBQS3HjL2wzuNd7FJbLC2VrgsmynC9E9HUxhKyU78fuE
0.355 | 3311: jSaiErS/6Z0xz6a+GlnaaYU5BQRT1PSOCLp8Xa3Rli2xqGvqzaZgOo9/MR7NglYT
0.355 | 3311: CC1LPTgMjgm+BEJxi5uYS17dCz+GM11OMqwKIYHt8P6bZewqGwIeEA6n/+15O53k
0.355 | 3311: s2rfMCYTWN8apaXyPpZ02p2yhRv81Ftgk/xhPPSafD65fj0l/0UtNn0wncelI76G
0.355 | 3311: ZN+44sECgYEA28qrcuBifPJEYLJ1Ed7wLTLVAFz/WDjwrm9qTdOV+PzKUUw5WIfW
0.355 | 3311: VFxex4yJX1uvsqlnHMEcQBgmO0zJIfa7X6Ey2MUAfBwtECVUZqi5mFzH7SbF42lG
0.355 | 3311: xQxNcEOiU/eOMhSLl4ueX26lMqtuqqyRZMWp/yoGucseqDua7Ore38kCgYEA0KLq
0.355 | 3311: LeE4PrNZgkrP9uhui9noaHVULloi/rUw9HtFFR+JpZFkKLlDnIfLuPfeLXPl93oh
0.355 | 3311: Iv/MNoH/HRrwIWPU9HFcZMGlF7NGRAdqHndjWx4ATAEvEwcrXKI85jcS4H71qIy5
0.355 | 3311: xptvTvcPyBZjTWHFGi3H+Wqbt02Hy2/1PXn1D1kCgYB47mM1Gsa1zqrWtXn6cjqd
0.355 | 3311: 89kEkSahiPrMT1sjJSbgT16WKqVm056W3hf8HZQ8FntuhwzH4mzDy0XF3PLRsCra
0.355 | 3311: bBxI7uPcZbhZqOBud6lebrZGAs5g2LzqkGFPQrdfEdd4MJu5r0VdZsiYP4DtgXOi
0.355 | 3311: CawGTD5VHbBpihos/vHQGQKBgQCMurdXRymXMYbY2fm5REL81nlrrGih827TfAiP
0.355 | 3311: gG6yttkz8wQqa/ztIrQKCje1VQ3zhk+/uedXhQw956O+Agbs82ItHGltuSFWVeIA
0.355 | 3311: tusv1ObCXbIRFa7sBwspBlEXWMv/ax0MmF/WVdQxVNiao6494E+ZL43lcMh+MwCq
0.355 | 3311: /Q6N0QKBgHyne57zVD41vq88rRTSDhFuvYJxGjHByxa5t6QGCCRv7sK/zP1j7kMl
0.355 | 3311: OebB++Zc+AiGbCzENsDjMQHV7nJBzRLeW6oI0rxdYv16tUr8zfg5Z8joo+CNGN6p
0.355 | 3311: b+5xNh4em7LwlULt2pwbAlohH8pAw2w3UAJobDBtNGRsN7SIfgig
0.355 | 3311: -----END RSA PRIVATE KEY-----
0.355 | 3311: "}, "CephMdsKey": "AQD8z7pZAAAAABAAr1/0c5kKa4lSLktRBip69A==", "ControllerServices": ["OS::TripleO::Services::Clustercheck", "OS::TripleO::Services::Docker", "OS::TripleO::Services::Kernel", "OS::TripleO::Services::Keystone", "OS::TripleO::Services::GlanceApi", "OS::TripleO::Services::HeatApi", "OS::TripleO::Services::HeatApiCfn", "OS::TripleO::Services::HeatApiCloudwatch", "OS::TripleO::Services::HeatEngine", "OS::TripleO::Services::MySQL", "OS::TripleO::Services::MySQLClient", "OS::TripleO::Services::NeutronDhcpAgent", "OS::TripleO::Services::NeutronL3Agent", "OS::TripleO::Services::NeutronMetadataAgent", "OS::TripleO::Services::NeutronServer", "OS::TripleO::Services::NeutronCorePlugin", "OS::TripleO::Services::NeutronOvsAgent", "OS::TripleO::Services::RabbitMQ", "OS::TripleO::Services::HAproxy", "OS::TripleO::Services::Keepalived", "OS::TripleO::Services::Memcached", "OS::TripleO::Services::Pacemaker", "OS::TripleO::Services::NovaConductor", "OS::TripleO::Services::NovaApi", "OS::TripleO::Services::NovaPlacement", "OS::TripleO::Services::NovaMetadata", "OS::TripleO::Services::NovaScheduler", "OS::TripleO::Services::Ntp", "OS::TripleO::Services::Snmp", "OS::TripleO::Services::Timezone", "OS::TripleO::Services::NovaCompute", "OS::TripleO::Services::NovaMigrationTarget", "OS::TripleO::Services::NovaLibvirt", "OS::TripleO::Services::SaharaApi", "OS::TripleO::Services::SaharaEngine", "OS::TripleO::Services::MistralApi", "OS::TripleO::Services::MistralEngine", "OS::TripleO::Services::MistralExecutor", "OS::TripleO::Services::TripleoPackages", "OS::TripleO::Services::TripleoFirewall", "OS::TripleO::Services::Sshd", "OS::TripleO::Services::Iscsid"], "AdminPassword": "CFbfJQQjEcBBnQEbqfrXJ7B6B", "CephMonKey": "AQD8z7pZAAAAABAAw4apQhXRJ2c81J8q5/mG4Q==", "CinderWorkers": 1, "SwiftPassword": "RMbWyb76MZBnRGmwmhRYecarB", "CeilometerMeteringSecret": "u9DAWjFQwjBWHTgXpUcQWWNZJ", "NeutronMetadataProxySharedSecret": "Zbkus6GFbmDnNQtfpT4rCepDz", "NovaWorkers": 1, "BarbicanPassword": "38qWTV4FGRxaK4txFdyEQCJKv", "OctaviaPassword": "QEehCbhA36nyhYhZAPFEA4r9G", "UpdateIdentifier": "", "CephRgwKey": "AQD8z7pZAAAAABAAAYUFut/MCfyRDbPhepJpaw=="}, "resource_registry": {"OS::TripleO::Services::Timezone": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/time/timezone.yaml", "OS::TripleO::Services::NeutronCorePluginML2OVN": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2-ovn.yaml", "OS::TripleO::Services::ExternalSwiftProxy": "OS::Heat::None", "OS::TripleO::Hosts::SoftwareConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/hosts-config.yaml", "OS::TripleO::Services::SensuClient": "OS::Heat::None", "OS::TripleO::Services::NeutronL2gwApi": "OS::Heat::None", "OS::TripleO::Services::CACerts": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ca-certs.yaml", "OS::TripleO::Network::Ports::NetIpListMap": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/net_ip_list_map.yaml", "OS::TripleO::Services::SwiftDispersion": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-dispersion.yaml", "OS::TripleO::Services::AodhApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-api.yaml", "OS::TripleO::Services::ManilaBackendVMAX": "OS::Heat::None", "OS::TripleO::DeployedServer::Bootstrap": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/deployed-server/deployed-server-bootstrap-centos.yaml", "OS::TripleO::Services::CinderBackendVRTSHyperScale": "OS::Heat::None", "OS::TripleO::Services::Memcached": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/memcached.yaml", "OS::TripleO::Services::BlockStorageCinderVolume": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-volume.yaml", "OS::TripleO::Services::CephOSD": "OS::Heat::None", "OS::TripleO::Services::Pacemaker": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker.yaml", "OS::TripleO::ControllerConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/controller-config.yaml", "OS::TripleO::Services::CinderBackup": "OS::Heat::None", "OS::TripleO::Services::NeutronVppAgent": "OS::Heat::None", "OS::TripleO::Services::Apache": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/apache.yaml", "OS::TripleO::Services::UndercloudGnocchiApi": "OS::Heat::None", "OS::TripleO::Services::MistralExecutor": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-executor.yaml", "OS::TripleO::Services::CinderBackendDellSc": "OS::Heat::None", "OS::TripleO::Services::CeilometerExpirer": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/ceilometer-expirer-disabled.yaml", "OS::TripleO::Services": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/services.yaml", "OS::TripleO::Services::CinderBackendDellEMCUnity": "OS::Heat::None", "OS::TripleO::Services::PacemakerRemote": "OS::Heat::None", "OS::TripleO::Services::TLSProxyBase": "OS::Heat::None", "OS::TripleO::Services::Iscsid": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/iscsid.yaml", "OS::TripleO::ControllerDeployedServer::Net::SoftwareConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/net-config-static-bridge.yaml", "OS::TripleO::Services::ComputeNeutronL3Agent": "OS::Heat::None", "OS::TripleO::Services::Ec2Api": "OS::Heat::None", "OS::TripleO::Services::SaharaEngine": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sahara-engine.yaml", "OS::TripleO::Ssh::KnownHostsConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/ssh/known_hosts_config.yaml", "OS::TripleO::Services::NeutronLinuxbridgeAgent": "OS::Heat::None", "OS::TripleO::AllNodes::SoftwareConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/all-nodes-config.yaml", "OS::TripleO::ControllerExtraConfigPre": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/extraconfig/pre_deploy/default.yaml", "OS::TripleO::Services::HAProxyInternalTLS": "OS::Heat::None", "OS::TripleO::Services::NovaMigrationTarget": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-migration-target.yaml", "OS::TripleO::Services::Tuned": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/tuned.yaml", "OS::TripleO::Services::NovaApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-api.yaml", "OS::TripleO::Services::NeutronML2FujitsuCfab": "OS::Heat::None", "OS::TripleO::Services::UndercloudCeilometerAgentIpmi": "OS::Heat::None", "OS::TripleO::Services::Congress": "OS::Heat::None", "OS::TripleO::DeployedServerEnvironment": "OS::Heat::None", "OS::TripleO::AllNodes::Validation": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/ci/common/all-nodes-validation-disabled.yaml", "OS::TripleO::Services::NeutronMetadataAgent": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-metadata.yaml", "OS::TripleO::Tasks::PackageUpdate": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/yum_update.yaml", "OS::TripleO::Services::ManilaApi": "OS::Heat::None", "OS::TripleO::Network::Ports::NetVipMap": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/net_ip_map.yaml", "OS::TripleO::Network::Ports::NetIpMap": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/net_ip_map.yaml", "OS::TripleO::Services::NeutronCorePluginMidonet": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-midonet.yaml", "OS::TripleO::SoftwareDeployment": "OS::Heat::StructuredDeployment", "OS::TripleO::Services::CinderBackendDellPs": "OS::Heat::None", "OS::TripleO::Controller::Ports::StorageMgmtPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::Etcd": "OS::Heat::None", "OS::TripleO::Services::OctaviaWorker": "OS::Heat::None", "OS::TripleO::Services::CephRbdMirror": "OS::Heat::None", "OS::TripleO::Services::ComputeCeilometerAgent": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-compute.yaml", "OS::TripleO::Services::Ntp": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/time/ntp.yaml", "OS::TripleO::Services::NovaScheduler": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-scheduler.yaml", "OS::TripleO::Services::TripleoFirewall": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/tripleo-firewall.yaml", "OS::TripleO::Services::Sshd": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sshd.yaml", "OS::TripleO::Network::External": "OS::Heat::None", "OS::TripleO::NodeAdminUserData": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/firstboot/userdata_heat_admin.yaml", "OS::TripleO::Services::NeutronL2gwAgent": "OS::Heat::None", "OS::TripleO::LoggingConfiguration": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/logging/fluentd-config.yaml", "OS::TripleO::Services::NovaPlacement": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-placement.yaml", "OS::TripleO::Services::Tacker": "OS::Heat::None", "OS::TripleO::Services::CinderHPELeftHandISCSI": "OS::Heat::None", "OS::TripleO::Services::OctaviaHousekeeping": "OS::Heat::None", "OS::TripleO::Services::UndercloudPankoApi": "OS::Heat::None", "OS::TripleO::Services::NovaConsoleauth": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-consoleauth.yaml", "OS::TripleO::Services::PankoApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/panko-api.yaml", "OS::TripleO::Services::UndercloudAodhApi": "OS::Heat::None", "OS::TripleO::Services::ManilaBackendNetapp": "OS::Heat::None", "OS::TripleO::Controller::Ports::StoragePort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::MySQL": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker/database/mysql.yaml", "OS::TripleO::Controller::Ports::InternalApiPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::UpgradeConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/upgrade_config.yaml", "OS::TripleO::Network::Tenant": "OS::Heat::None", "OS::TripleO::Tasks::ControllerPostConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/post_puppet_pacemaker.yaml", "OS::TripleO::Services::ComputeNeutronOvsDpdk": "OS::Heat::None", "OS::TripleO::Services::UndercloudGnocchiMetricd": "OS::Heat::None", "OS::TripleO::Services::NeutronSriovAgent": "OS::Heat::None", "OS::TripleO::Network::Ports::StorageVipPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Controller::Ports::ExternalPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::IronicInspector": "OS::Heat::None", "OS::TripleO::Services::NeutronOvsAgent": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-ovs-agent.yaml", "OS::TripleO::Services::NeutronML2FujitsuFossw": "OS::Heat::None", "OS::TripleO::Ssh::HostPubKey": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/ssh/host_public_key.yaml", "OS::TripleO::AllNodesExtraConfig": "OS::Heat::None", "OS::TripleO::Services::NeutronSriovHostConfig": "OS::Heat::None", "OS::TripleO::Services::OpenDaylightOvs": "OS::Heat::None", "OS::TripleO::Services::NeutronCorePluginML2Nuage": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2-nuage.yaml", "OS::TripleO::DeploymentSteps": "OS::Heat::StructuredDeploymentGroup", "OS::TripleO::Services::Collectd": "OS::Heat::None", "OS::TripleO::RandomString": "OS::Heat::RandomString", "OS::TripleO::Services::ComputeNeutronOvsAgent": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-ovs-agent.yaml", "OS::TripleO::Services::CeilometerCollector": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/ceilometer-collector-disabled.yaml", "OS::TripleO::Network::StorageMgmt": "OS::Heat::None", "OS::TripleO::Services::UndercloudGnocchiStatsd": "OS::Heat::None", "OS::TripleO::Network::Storage": "OS::Heat::None", "OS::TripleO::Services::GnocchiApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-api.yaml", "OS::TripleO::Services::Clustercheck": "OS::Heat::None", "OS::TripleO::Services::HeatApiCfn": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-api-cfn.yaml", "OS::TripleO::DefaultPasswords": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/default_passwords.yaml", "OS::TripleO::Controller::Ports::TenantPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::ManilaBackendCephFs": "OS::Heat::None", "OS::TripleO::Services::ManilaBackendVNX": "OS::Heat::None", "OS::TripleO::Services::CinderBackendNetApp": "OS::Heat::None", "OS::TripleO::Services::CephRgw": "OS::Heat::None", "OS::TripleO::Services::HeatEngine": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-engine.yaml", "OS::TripleO::Services::RabbitMQ": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker/rabbitmq.yaml", "OS::TripleO::Services::UndercloudAodhEvaluator": "OS::Heat::None", "OS::TripleO::Services::SwiftStorage": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-storage.yaml", "OS::TripleO::Network::Ports::StorageMgmtVipPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::CinderScheduler": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-scheduler.yaml", "OS::TripleO::Services::IronicConductor": "OS::Heat::None", "OS::TripleO::Controller::PreNetworkConfig": "OS::Heat::None", "OS::TripleO::Services::NeutronCorePluginNuage": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-nuage.yaml", "OS::TripleO::AllNodesDeployment": "OS::Heat::StructuredDeployments", "OS::TripleO::Services::CinderBackendScaleIO": "OS::Heat::None", "OS::TripleO::Services::Zaqar": "OS::Heat::None", "OS::TripleO::Services::OctaviaApi": "OS::Heat::None", "OS::TripleO::Services::CeilometerAgentNotification": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-notification.yaml", "OS::TripleO::NodeExtraConfigPost": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/post_deploy/default.yaml", "OS::TripleO::Services::Keystone": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/keystone.yaml", "OS::TripleO::Services::ComputeNeutronCorePluginNuage": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-compute-plugin-nuage.yaml", "OS::TripleO::Services::AodhListener": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-listener.yaml", "OS::TripleO::Services::NeutronServer": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-api.yaml", "OS::TripleO::Services::OVNDBs": "OS::Heat::None", "OS::TripleO::Services::UndercloudCeilometerAgentNotification": "OS::Heat::None", "OS::TripleO::Services::ManilaScheduler": "OS::Heat::None", "OS::TripleO::Services::ManilaBackendIsilon": "OS::Heat::None", "OS::TripleO::Services::BarbicanApi": "OS::Heat::None", "OS::TripleO::Services::VRTSHyperScale": "OS::Heat::None", "OS::TripleO::Services::SwiftRingBuilder": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-ringbuilder.yaml", "OS::TripleO::Services::NovaLibvirt": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-libvirt.yaml", "OS::TripleO::Tasks::UpdateWorkflow": "OS::Heat::None", "OS::TripleO::Services::UndercloudCeilometerAgentCentral": "OS::Heat::None", "OS::TripleO::Services::OVNController": "OS::Heat::None", "OS::TripleO::Network::Ports::ExternalVipPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::ComputeNeutronMetadataAgent": "OS::Heat::None", "OS::TripleO::ServiceServerMetadataHook": "OS::Heat::None", "OS::TripleO::Services::GnocchiMetricd": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-metricd.yaml", "OS::TripleO::Services::AodhEvaluator": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-evaluator.yaml", "OS::TripleO::PostDeploySteps": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/post.yaml", "OS::TripleO::Services::CinderApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-api.yaml", "OS::TripleO::Services::ManilaBackendUnity": "OS::Heat::None", "OS::TripleO::Tasks::ControllerPostPuppetRestart": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/post_puppet_pacemaker_restart.yaml", "OS::TripleO::Network::InternalApi": "OS::Heat::None", "OS::TripleO::Compute::Net::SoftwareConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/ci/common/net-config-multinode.yaml", "OS::TripleO::Services::AuditD": "OS::Heat::None", "OS::TripleO::Services::TripleoPackages": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/tripleo-packages.yaml", "OS::TripleO::NodeExtraConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/extraconfig/pre_deploy/default.yaml", "OS::TripleO::Services::HeatApiCloudwatch": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-api-cloudwatch.yaml", "OS::TripleO::Services::FluentdClient": "OS::Heat::None", "OS::TripleO::Services::Snmp": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/snmp.yaml", "OS::TripleO::Network::Ports::InternalApiVipPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::MistralEngine": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-engine.yaml", "OS::TripleO::Services::GlanceApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/glance-api.yaml", "OS::TripleO::Controller": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/controller-role.yaml", "OS::TripleO::Services::NovaIronic": "OS::Heat::None", "OS::TripleO::Network": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/networks.yaml", "OS::TripleO::Services::ContainersLogrotateCrond": "OS::Heat::None", "OS::TripleO::EndpointMap": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/endpoints/endpoint_map.yaml", "OS::TripleO::Services::MongoDb": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/mongodb-disabled.yaml", "OS::TripleO::Services::NovaCompute": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-compute.yaml", "OS::TripleO::Services::CeilometerAgentIpmi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-ipmi.yaml", "OS::TripleO::Services::GlanceRegistry": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/glance-registry-disabled.yaml", "OS::TripleO::Network::ExtraConfig": "OS::Heat::None", "OS::TripleO::Services::CinderBackendDellEMCVMAXISCSI": "OS::Heat::None", "OS::TripleO::Services::UndercloudAodhListener": "OS::Heat::None", "OS::TripleO::Services::NeutronApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-api.yaml", "OS::TripleO::NodeTLSCAData": "OS::Heat::None", "OS::TripleO::Services::CertmongerUser": "OS::Heat::None", "OS::TripleO::Services::NeutronL3Agent": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-l3.yaml", "OS::TripleO::Controller::Net::SoftwareConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/ci/common/net-config-multinode.yaml", "OS::TripleO::Services::CephMds": "OS::Heat::None", "OS::TripleO::Services::CeilometerApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/disabled/ceilometer-api-disabled.yaml", "OS::TripleO::Services::SwiftProxy": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/swift-proxy.yaml", "OS::TripleO::Services::OpenDaylightApi": "OS::Heat::None", "OS::TripleO::Services::ManilaBackendGeneric": "OS::Heat::None", "OS::TripleO::Services::NeutronCorePluginNSX": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-nsx.yaml", "OS::TripleO::Services::Vpp": "OS::Heat::None", "OS::TripleO::ControllerPostDeploySteps": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/post.yaml", "OS::TripleO::Services::AodhNotifier": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/aodh-notifier.yaml", "OS::TripleO::Services::SaharaApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/sahara-api.yaml", "OS::TripleO::Services::HAproxy": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/pacemaker/haproxy.yaml", "OS::TripleO::Services::UndercloudAodhNotifier": "OS::Heat::None", "OS::TripleO::Services::IronicApi": "OS::Heat::None", "OS::TripleO::Services::NeutronDhcpAgent": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-dhcp.yaml", "OS::TripleO::Services::OctaviaHealthManager": "OS::Heat::None", "OS::TripleO::Services::Securetty": "OS::Heat::None", "OS::TripleO::Services::NeutronCorePluginPlumgrid": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-plumgrid.yaml", "OS::TripleO::Services::Redis": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/redis.yaml", "OS::TripleO::Network::Ports::RedisVipPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/ctlplane_vip.yaml", "OS::TripleO::Services::MistralApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/mistral-api.yaml", "OS::TripleO::ServiceNetMap": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/service_net_map.yaml", "OS::TripleO::Services::MySQLClient": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/database/mysql-client.yaml", "OS::TripleO::Tasks::ControllerPreConfig": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/extraconfig/tasks/pre_puppet_pacemaker.yaml", "OS::TripleO::ControllerServer": "OS::TripleO::Server", "OS::TripleO::Services::ComputeNeutronCorePlugin": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2.yaml", "OS::TripleO::Network::Management": "OS::Heat::None", "OS::TripleO::Server": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/deployed-server/deployed-server.yaml", "OS::TripleO::Services::NovaConductor": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-conductor.yaml", "OS::TripleO::Services::CephMon": "OS::Heat::None", "OS::TripleO::Services::Horizon": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/horizon.yaml", "OS::TripleO::Controller::Ports::ManagementPort": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/network/ports/noop.yaml", "OS::TripleO::Services::Qdr": "OS::Heat::None", "OS::TripleO::Services::HeatApi": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/heat-api.yaml", "OS::TripleO::Network::Ports::ControlPlaneVipPort": "OS::Neutron::Port", "OS::TripleO::Services::CeilometerAgentCentral": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/ceilometer-agent-central.yaml", "OS::TripleO::Services::CephExternal": "OS::Heat::None", "OS::TripleO::PostUpgradeSteps": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/common/post-upgrade.yaml", "OS::TripleO::Services::NovaMetadata": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-metadata.yaml", "OS::TripleO::Services::Docker": "OS::Heat::None", "OS::TripleO::Services::HAProxyPublicTLS": "OS::Heat::None", "OS::TripleO::Services::GnocchiStatsd": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/gnocchi-statsd.yaml", "OS::TripleO::NodeUserData": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/firstboot/userdata_default.yaml", "OS::TripleO::NodeTLSData": "OS::Heat::None", "OS::TripleO::Services::NeutronBgpVpnApi": "OS::Heat::None", "OS::TripleO::Services::ManilaShare": "OS::Heat::None", "OS::TripleO::Services::NovaVncProxy": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/nova-vnc-proxy.yaml", "OS::TripleO::Controller::NodeUserData": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/firstboot/userdata_default.yaml", "OS::TripleO::Services::CephClient": "OS::Heat::None", "OS::TripleO::Services::NeutronLbaasv2Agent": "OS::Heat::None", "OS::TripleO::Services::Kernel": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/kernel.yaml", "OS::TripleO::Services::NeutronBgpVpnBagpipe": "OS::Heat::None", "OS::TripleO::Services::Keepalived": "OS::Heat::None", "OS::TripleO::Services::CinderVolume": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/cinder-volume.yaml", "OS::TripleO::DeployedServer::ControlPlanePort": "OS::Neutron::Port", "OS::TripleO::Services::NeutronCorePlugin": "http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/overcloud/puppet/services/neutron-plugin-ml2.yaml"}}, "template": {"heat_template_version": "pike", "description": "Deploy an OpenStack environment, consisting of several node types (roles), Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage roles enable independent scaling of the storage components, but the minimal deployment is one Controller and one Compute node.
0.355 | 3311: ", "parameters": {"ControllerParameters": {"default": {}, "type": "json", "description": "Optional Role Specific parameters to be provided to service"}, "HypervisorNeutronPublicInterface": {"default": "nic1", "type": "string", "description": "What interface to add to the HypervisorNeutronPhysicalBridge."}, "ControllerCount": {"default": 1, "type": "number", "description": "Number of Controller nodes to deploy"}, "ExtraConfig": {"default": {}, "type": "json", "description": "Additional hiera configuration to inject into the cluster.
0.355 | 3311: "}, "ControllerRemovalPolicies": {"default": [], "type": "json", "description": "List of resources to be removed from Controller ResourceGroup when doing an update which requires removal of specific resources. Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
0.355 | 3311: "}, "CloudNameCtlplane": {"default": "overcloud.ctlplane.localdomain", "type": "string", "description": "The DNS name of this cloud's provisioning network endpoint. E.g. 'ci-overcloud.ctlplane.tripleo.org'.
0.355 | 3311: "}, "PublicVirtualFixedIPs": {"default": [], "type": "json", "description": "Control the IP allocation for the PublicVirtualInterface port. E.g. [{'ip_address':'1.2.3.4'}]
0.355 | 3311: "}, "StorageMgmtVirtualFixedIPs": {"default": [], "type": "json", "description": "Control the IP allocation for the StorageMgmtVirtualInterface port. E.g. [{'ip_address':'1.2.3.4'}]
0.355 | 3311: "}, "InternalApiVirtualFixedIPs": {"default": [], "type": "json", "description": "Control the IP allocation for the InternalApiVirtualInterface port. E.g. [{'ip_address':'1.2.3.4'}]
0.355 | 3311: "}, "AddVipsToEtcHosts": {"default": true, "type": "boolean", "description": "Set to true to append per network Vips to /etc/hosts on each node.
0.355 | 3311: "}, "CloudNameStorageManagement": {"default": "overcloud.storagemgmt.localdomain", "type": "string", "description": "The DNS name of this cloud's storage_mgmt endpoint. E.g. 'ci-overcloud.storagemgmt.tripleo.org'.
0.355 | 3311: "}, "DeployIdentifier": {"default": "", "type": "string", "description": "Setting this to a unique value will re-run any deployment tasks which perform configuration on a Heat stack-update.
0.355 | 3311: "}, "NeutronPublicInterface": {"default": "nic1", "type": "string", "description": "Which interface to add to the NeutronPhysicalBridge."}, "HypervisorNeutronPhysicalBridge": {"default": "br-ex", "type": "string", "description": "An OVS bridge to create on each hypervisor. This defaults to br-ex the same as the control plane nodes, as we have a uniform configuration of the openvswitch agent. Typically should not need to be changed.
0.355 | 3311: "}, "CloudNameInternal": {"default": "overcloud.internalapi.localdomain", "type": "string", "description": "The DNS name of this cloud's internal_api endpoint. E.g. 'ci-overcloud.internalapi.tripleo.org'.
0.355 | 3311: "}, "ControllerExtraConfig": {"default": {}, "type": "json", "description": "Role specific additional hiera configuration to inject into the cluster.
0.355 | 3311: "}, "StorageVirtualFixedIPs": {"default": [], "type": "json", "description": "Control the IP allocation for the StorageVirtualInterface port. E.g. [{'ip_address':'1.2.3.4'}]
0.355 | 3311: "}, "ControllerHostnameFormat": {"default": "%stackname%-controller-%index%", "type": "string", "description": "Format for Controller node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
0.355 | 3311: "}, "CloudDomain": {"default": "localdomain", "type": "string", "description": "The DNS domain used for the hosts. This must match the overcloud_domain_name configured on the undercloud.
0.355 | 3311: "}, "RabbitCookieSalt": {"default": "unset", "type": "string", "description": "Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change."}, "UpdateIdentifier": {"default": "", "type": "string", "description": "Setting to a previously unused value during stack-update will trigger package update on all nodes
0.355 | 3311: "}, "ControlFixedIPs": {"default": [], "type": "json", "description": "Control the IP allocation for the ControlVirtualIP port. E.g. [{'ip_address':'1.2.3.4'}]
0.355 | 3311: "}, "ControllerServices": {"type": "comma_delimited_list", "description": "A list of service resources (configured in the Heat resource_registry) which represent nested stacks for each service that should get installed on the Controller role."}, "CloudName": {"default": "overcloud.localdomain", "type": "string", "description": "The DNS name of this cloud. E.g. ci-overcloud.tripleo.org"}, "ServerMetadata": {"default": {}, "type": "json", "description": "Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API.
0.355 | 3311: "}, "DeploymentServerBlacklist": {"default": [], "type": "comma_delimited_list", "description": "List of server hostnames to blacklist from any triggered deployments.
0.355 | 3311: "}, "ControllerSchedulerHints": {"default": {}, "type": "json", "description": "Optional scheduler hints to pass to nova"}, "NeutronControlPlaneID": {"default": "ctlplane", "type": "string", "description": "Neutron ID or name for ctlplane network."}, "CloudNameStorage": {"default": "overcloud.storage.localdomain", "type": "string", "description": "The DNS name of this cloud's storage endpoint. E.g. 'ci-overcloud.storage.tripleo.org'.
0.355 | 3311: "}, "NodeCreateBatchSize": {"default": 30, "type": "number", "description": "Maxiumum batch size for creating nodes"}, "RedisVirtualFixedIPs": {"default": [], "type": "json", "description": "Control the IP allocation for the virtual IP used by Redis. E.g. [{'ip_address':'1.2.3.4'}]
0.355 | 3311: "}}, "outputs": {"RoleData": {"description": "The configuration data associated with each role", "value": {"Controller": {"map_merge": [{"get_attr": ["ControllerServiceChainRoleData", "value"]}, {"get_attr": ["ControllerMergedConfigSettings", "value"]}]}}}, "RoleConfig": {"description": "The configuration workflows associated with each role", "value": {"get_attr": ["AllNodesDeploySteps", "RoleConfig"]}}, "VipMap": {"description": "Mapping of each network to VIP addresses. Also includes the Redis VIP.", "value": {"map_merge": [{"get_attr": ["VipMap", "net_ip_map"]}, {"redis": {"get_attr": ["RedisVirtualIP", "ip_address"]}}]}}, "RoleNetIpMap": {"description": "Mapping of each network to a list of IPs for each role", "value": {"Controller": {"get_attr": ["ControllerIpListMap", "net_ip_map"]}}}, "EnabledServices": {"description": "The services enabled on each role", "value": {"Controller": {"get_attr": ["ControllerServiceNames", "value"]}}}, "RoleNetHostnameMap": {"description": "Mapping of each network to a list of hostnames for each role", "value": {"Controller": {"get_attr": ["ControllerNetworkHostnameMap", "value"]}}}, "ManagedEndpoints": {"description": "Asserts that the keystone endpoints have been provisioned.", "value": true}, "EndpointMap": {"description": "Mapping of the resources with the needed info for their endpoints.
0.355 | 3311: This includes the protocol used, the IP, port and also a full
0.355 | 3311: representation of the URI.
0.355 | 3311: ", "value": {"get_attr": ["EndpointMapData", "value"]}}, "HostsEntry": {"description": "The content that should be appended to your /etc/hosts if you want to get
0.355 | 3311: hostname-based access to the deployed nodes (useful for testing without
0.355 | 3311: setting up a DNS).
0.355 | 3311: ", "value": {"list_join": ["
0.355 | 3311: ", [{"get_attr": ["hostsConfig", "hosts_entries"]}], [{"get_attr": ["VipHosts", "value"]}]]}}, "DeployedServerEnvironment": {"description": "Environment data that can be used as input into the services stack when using split-stack.", "value": {"get_attr": ["DeployedServerEnvironment", "deployed_server_environment"]}}, "ServerOsCollectConfigData": {"description": "The os-collect-config configuration associated with each server resource", "value": {"get_attr": ["ServerOsCollectConfigData", "value"]}}, "KeystoneURL": {"description": "URL for the Overcloud Keystone service", "value": {"get_attr": ["EndpointMapData", "value", "KeystonePublic", "uri"]}}, "KeystoneAdminVip": {"description": "Keystone Admin VIP endpoint", "value": {"get_attr": ["VipMap", "net_ip_map", {"get_attr": ["ServiceNetMap", "service_net_map", "KeystoneAdminApiNetwork"]}]}}, "ServerIdData": {"description": "Mapping of each role to a list of nova server IDs and the bootstrap ID", "value": {"get_attr": ["ServerIdMap", "value"]}}}, "conditions": {"add_vips_to_etc_hosts": {"equals": [{"get_param": "AddVipsToEtcHosts"}, true]}}, "resources": {"ControllerServers": {"depends_on": "Controller", "type": "OS::Heat::Value", "properties": {"type": "json", "value": {"yaql": {"expression": "let(servers=>switch(isDict($.data.servers) => $.data.servers, true => {})) -> $servers.deleteAll($servers.keys().where($servers[$] = null))", "data": {"servers": {"get_attr": ["Controller", "attributes", "nova_server_resource"]}}}}}}, "UpdateWorkflow": {"depends_on": ["ControllerAllNodesDeployment"], "type": "OS::TripleO::Tasks::UpdateWorkflow", "properties": {"input_values": {"update_identifier": {"get_param": "UpdateIdentifier"}, "deploy_identifier": {"get_param": "DeployIdentifier"}}, "servers": {"Controller": {"get_attr": ["ControllerServers", "value"]}}}}, "NetCidrMapValue": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"map_replace": [{"map_merge": [{"get_attr": ["Networks", "net_cidr_map"]}, {"ctlplane": {"get_attr": ["ControlVirtualIP", "subnets", 0, "cidr"]}}]}, {"keys": {"ctlplane": {"get_param": "NeutronControlPlaneID"}}, "values": {"disabled": {"get_attr": ["ControlVirtualIP", "subnets", 0, "cidr"]}}}]}}}, "ServerIdMap": {"type": "OS::Heat::Value", "properties": {"value": {"server_ids": {"Controller": {"get_attr": ["Controller", "nova_server_resource"]}}, "bootstrap_server_id": {"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "nova_server_resource"]}}}}}}, "ServiceNetMap": {"type": "OS::TripleO::ServiceNetMap"}, "hostsConfig": {"type": "OS::TripleO::Hosts::SoftwareConfig", "properties": {"hosts": {"list_join": ["
0.355 | 3311: ", [{"if": ["add_vips_to_etc_hosts", {"get_attr": ["VipHosts", "value"]}, ""]}], [{"list_join": ["", {"get_attr": ["Controller", "hosts_entry"]}]}]]}}}, "ControllerIpListMap": {"type": "OS::TripleO::Network::Ports::NetIpListMap", "properties": {"NetworkHostnameMap": {"get_attr": ["ControllerNetworkHostnameMap", "value"]}, "TenantIpList": {"get_attr": ["Controller", "tenant_ip_address"]}, "EnabledServices": {"get_attr": ["ControllerServiceNames", "value"]}, "ServiceNetMap": {"get_attr": ["ServiceNetMap", "service_net_map_lower"]}, "ServiceHostnameList": {"get_attr": ["Controller", "hostname"]}, "ExternalIpList": {"get_attr": ["Controller", "external_ip_address"]}, "StorageMgmtIpList": {"get_attr": ["Controller", "storage_mgmt_ip_address"]}, "InternalApiIpList": {"get_attr": ["Controller", "internal_api_ip_address"]}, "StorageIpList": {"get_attr": ["Controller", "storage_ip_address"]}, "ControlPlaneIpList": {"get_attr": ["Controller", "ip_address"]}}}, "AllNodesExtraConfig": {"depends_on": ["UpdateWorkflow", "ControllerAllNodesValidationDeployment"], "type": "OS::TripleO::AllNodesExtraConfig", "properties": {"servers": {"Controller": {"get_attr": ["ControllerServers", "value"]}}}}, "allNodesConfig": {"type": "OS::TripleO::AllNodes::SoftwareConfig", "properties": {"short_service_bootstrap_node": {"yaql": {"expression": "dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten().first()]))", "data": {"l": [{"get_attr": ["ControllerIpListMap", "short_service_bootstrap_hostnames"]}]}}}, "service_ips": {"yaql": {"expression": "dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))", "data": {"l": [{"get_attr": ["ControllerIpListMap", "service_ips"]}]}}}, "cloud_name_storage_mgmt": {"get_param": "CloudNameStorageManagement"}, "cloud_name_storage": {"get_param": "CloudNameStorage"}, "DeployIdentifier": {"get_param": "DeployIdentifier"}, "short_service_node_names": {"yaql": {"expression": "dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))", "data": {"l": [{"get_attr": ["ControllerIpListMap", "short_service_hostnames"]}]}}}, "cloud_name_internal_api": {"get_param": "CloudNameInternal"}, "controller_ips": {"get_attr": ["Controller", "ip_address"]}, "RedisVirtualIP": {"get_attr": ["RedisVirtualIP", "ip_address"]}, "controller_names": {"get_attr": ["Controller", "hostname"]}, "cloud_name_external": {"get_param": "CloudName"}, "cloud_name_ctlplane": {"get_param": "CloudNameCtlplane"}, "ServiceNetMap": {"get_attr": ["ServiceNetMap", "service_net_map_lower"]}, "service_node_names": {"yaql": {"expression": "dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))", "data": {"l": [{"get_attr": ["ControllerIpListMap", "service_hostnames"]}]}}}, "NetVipMap": {"get_attr": ["VipMap", "net_ip_map"]}, "enabled_services": {"list_join": [",", {"get_attr": ["ControllerServiceNames", "value"]}]}, "UpdateIdentifier": {"get_param": "UpdateIdentifier"}}}, "ControllerServiceNames": {"depends_on": "ControllerServiceChain", "type": "OS::Heat::Value", "properties": {"type": "comma_delimited_list", "value": {"yaql": {"expression": "coalesce($.data, []).where($ != null)", "data": {"get_attr": ["ControllerServiceChainRoleData", "value", "service_names"]}}}}}, "ControllerServiceChain": {"type": "OS::TripleO::Services", "properties": {"ServiceData": {"net_cidr_map": {"get_attr": ["NetCidrMapValue", "value"]}}, "DefaultPasswords": {"get_attr": ["DefaultPasswords", "passwords"]}, "EndpointMap": {"get_attr": ["EndpointMap", "endpoint_map"]}, "Services": {"get_param": "ControllerServices"}, "RoleName": "Controller", "ServiceNetMap": {"get_attr": ["ServiceNetMap", "service_net_map"]}, "RoleParameters": {"get_param": "ControllerParameters"}}}, "ServerOsCollectConfigData": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"Controller": {"get_attr": ["Controller", "attributes", "os_collect_config"]}}}}, "StorageVirtualIP": {"depends_on": "Networks", "type": "OS::TripleO::Network::Ports::StorageVipPort", "properties": {"PortName": "storage_virtual_ip", "FixedIPs": {"get_param": "StorageVirtualFixedIPs"}, "ControlPlaneIP": {"get_attr": ["ControlVirtualIP", "fixed_ips", 0, "ip_address"]}}}, "ControllerSshKnownHostsDeployment": {"type": "OS::Heat::StructuredDeployments", "properties": {"config": {"get_resource": "SshKnownHostsConfig"}, "name": "ControllerSshKnownHostsDeployment", "servers": {"get_attr": ["ControllerServers", "value"]}}}, "SshKnownHostsConfig": {"type": "OS::TripleO::Ssh::KnownHostsConfig", "properties": {"known_hosts": {"list_join": ["", {"get_attr": ["Controller", "known_hosts_entry"]}]}}}, "RedisVirtualIP": {"depends_on": "Networks", "type": "OS::TripleO::Network::Ports::RedisVipPort", "properties": {"FixedIPs": {"get_param": "RedisVirtualFixedIPs"}, "ServiceName": "redis", "ControlPlaneNetwork": {"get_param": "NeutronControlPlaneID"}, "PortName": "redis_virtual_ip", "ControlPlaneIP": {"get_attr": ["ControlVirtualIP", "fixed_ips", 0, "ip_address"]}, "NetworkName": {"get_attr": ["ServiceNetMap", "service_net_map", "RedisNetwork"]}}}, "ControllerAllNodesValidationDeployment": {"depends_on": "ControllerAllNodesDeployment", "type": "OS::Heat::StructuredDeployments", "properties": {"config": {"get_resource": "AllNodesValidationConfig"}, "name": "ControllerAllNodesValidationDeployment", "servers": {"get_attr": ["ControllerServers", "value"]}}}, "PcsdPassword": {"type": "OS::TripleO::RandomString", "properties": {"length": 16}}, "RabbitCookie": {"type": "OS::TripleO::RandomString", "properties": {"length": 20, "salt": {"get_param": "RabbitCookieSalt"}}}, "ControlVirtualIP": {"depends_on": "Networks", "type": "OS::TripleO::Network::Ports::ControlPlaneVipPort", "properties": {"replacement_policy": "AUTO", "fixed_ips": {"get_param": "ControlFixedIPs"}, "name": "control_virtual_ip", "network": {"get_param": "NeutronControlPlaneID"}}}, "EndpointMapData": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"get_attr": ["EndpointMap", "endpoint_map"]}}}, "Controller": {"depends_on": "Networks", "type": "OS::Heat::ResourceGroup", "properties": {"count": {"get_param": "ControllerCount"}, "resource_def": {"type": "OS::TripleO::Controller", "properties": {"NodeIndex": "%index%", "Hostname": {"str_replace": {"params": {"%stackname%": {"get_param": "OS::stack_name"}}, "template": {"get_param": "ControllerHostnameFormat"}}}, "CloudDomain": {"get_param": "CloudDomain"}, "LoggingSources": {"get_attr": ["ControllerServiceChainRoleData", "value", "logging_sources"]}, "MonitoringSubscriptions": {"get_attr": ["ControllerServiceChainRoleData", "value", "monitoring_subscriptions"]}, "ServiceNetMap": {"get_attr": ["ServiceNetMap", "service_net_map"]}, "LoggingGroups": {"get_attr": ["ControllerServiceChainRoleData", "value", "logging_groups"]}, "DeploymentServerBlacklistDict": {"get_attr": ["DeploymentServerBlacklistDict", "value"]}, "ServiceConfigSettings": {"get_attr": ["ControllerServiceConfigSettings", "value"]}, "RoleParameters": {"get_param": "ControllerParameters"}, "ControllerSchedulerHints": {"map_merge": [{"get_param": "ControllerSchedulerHints"}]}, "EndpointMap": {"get_attr": ["EndpointMap", "endpoint_map"]}, "ServiceMetadataSettings": {"get_attr": ["ControllerServiceChainRoleData", "value", "service_metadata_settings"]}, "ServiceNames": {"get_attr": ["ControllerServiceNames", "value"]}}}, "removal_policies": {"get_param": "ControllerRemovalPolicies"}}, "update_policy": {"batch_create": {"max_batch_size": {"get_param": "NodeCreateBatchSize"}}}}, "ControllerServiceConfigSettings": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"map_merge": [{"get_attr": ["ControllerServiceChainRoleData", "value", "config_settings"]}, {"get_attr": ["ControllerServiceChainRoleData", "value", "global_config_settings"]}, {"yaql": {"expression": "let(root => $) -> $.data.map.items().where($[0] in coalesce($root.data.services, [])).select($[1]).reduce($1.mergeWith($2), {})", "data": {"services": {"get_attr": ["ControllerServiceNames", "value"]}, "map": {"yaql": {"expression": "$.data.where($ != null).reduce($1.mergeWith($2), {})", "data": [{"get_attr": ["ControllerServiceChainRoleData", "value", "service_config_settings"]}]}}}}}]}}}, "MysqlRootPassword": {"type": "OS::TripleO::RandomString", "properties": {"length": 10}}, "ControllerServiceChainRoleData": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"get_attr": ["ControllerServiceChain", "role_data"]}}}, "DeployedServerEnvironment": {"type": "OS::TripleO::DeployedServerEnvironment", "properties": {"DeployedServerDeploymentSwiftDataMap": {"map_merge": {"list_concat": [{"get_attr": ["Controller", "deployed_server_deployment_swift_data_map"]}]}}, "DefaultRouteIp": {"str_split": [":", {"str_split": ["/", {"get_attr": ["ServerOsCollectConfigData", "value", "Controller", "0", "request", "metadata_url"]}, 2]}, 0]}, "DeployedServerPortMap": {"map_merge": {"list_concat": [{"get_attr": ["Controller", "deployed_server_port_map"]}]}}, "RoleCounts": {"ControllerDeployedServerCount": {"get_param": "ControllerCount"}}, "VipMap": {"map_merge": [{"get_attr": ["VipMap", "net_ip_map"]}, {"redis": {"get_attr": ["RedisVirtualIP", "ip_address"]}}]}}}, "InternalApiVirtualIP": {"depends_on": "Networks", "type": "OS::TripleO::Network::Ports::InternalApiVipPort", "properties": {"PortName": "internal_api_virtual_ip", "FixedIPs": {"get_param": "InternalApiVirtualFixedIPs"}, "ControlPlaneIP": {"get_attr": ["ControlVirtualIP", "fixed_ips", 0, "ip_address"]}}}, "AllNodesDeploySteps": {"depends_on": ["AllNodesExtraConfig", "ControllerAllNodesDeployment"], "type": "OS::TripleO::PostDeploySteps", "properties": {"stack_name": {"get_param": "OS::stack_name"}, "EndpointMap": {"get_attr": ["EndpointMap", "endpoint_map"]}, "ctlplane_service_ips": {"yaql": {"expression": "dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))", "data": {"l": [{"get_attr": ["ControllerIpListMap", "ctlplane_service_ips"]}]}}}, "role_data": {"Controller": {"map_merge": [{"get_attr": ["ControllerServiceChainRoleData", "value"]}, {"get_attr": ["ControllerMergedConfigSettings", "value"]}]}}, "servers": {"Controller": {"get_attr": ["ControllerServers", "value"]}}}}, "AllNodesValidationConfig": {"type": "OS::TripleO::AllNodes::Validation", "properties": {"PingTestIps": {"list_join": [" ", [{"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "external_ip_address"]}}}, {"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "internal_api_ip_address"]}}}, {"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "storage_ip_address"]}}}, {"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "storage_mgmt_ip_address"]}}}, {"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "tenant_ip_address"]}}}]]}}}, "VipMap": {"depends_on": "ServiceNetMap", "type": "OS::TripleO::Network::Ports::NetVipMap", "properties": {"ExternalIp": {"get_attr": ["PublicVirtualIP", "ip_address"]}, "StorageIp": {"get_attr": ["StorageVirtualIP", "ip_address"]}, "ControlPlaneIp": {"get_attr": ["ControlVirtualIP", "fixed_ips", 0, "ip_address"]}, "InternalApiIp": {"get_attr": ["InternalApiVirtualIP", "ip_address"]}, "InternalApiIpUri": {"get_attr": ["InternalApiVirtualIP", "ip_address_uri"]}, "StorageIpUri": {"get_attr": ["StorageVirtualIP", "ip_address_uri"]}, "StorageMgmtIpUri": {"get_attr": ["StorageMgmtVirtualIP", "ip_address_uri"]}, "ExternalIpUri": {"get_attr": ["PublicVirtualIP", "ip_address_uri"]}, "StorageMgmtIp": {"get_attr": ["StorageMgmtVirtualIP", "ip_address"]}}}, "HeatAuthEncryptionKey": {"type": "OS::TripleO::RandomString"}, "DefaultPasswords": {"type": "OS::TripleO::DefaultPasswords", "properties": {"DefaultMysqlRootPassword": {"get_attr": ["MysqlRootPassword", "value"]}, "DefaultHorizonSecret": {"get_attr": ["HorizonSecret", "value"]}, "DefaultHeatAuthEncryptionKey": {"get_attr": ["HeatAuthEncryptionKey", "value"]}, "DefaultRabbitCookie": {"get_attr": ["RabbitCookie", "value"]}, "DefaultPcsdPassword": {"get_attr": ["PcsdPassword", "value"]}}}, "EndpointMap": {"type": "OS::TripleO::EndpointMap", "properties": {"NetIpMap": {"get_attr": ["VipMap", "net_ip_map"]}, "CloudEndpoints": {"storage_mgmt": {"get_param": "CloudNameStorageManagement"}, "ctlplane": {"get_param": "CloudNameCtlplane"}, "storage": {"get_param": "CloudNameStorage"}, "external": {"get_param": "CloudName"}, "internal_api": {"get_param": "CloudNameInternal"}}, "ServiceNetMap": {"get_attr": ["ServiceNetMap", "service_net_map"]}}}, "ControllerNetworkHostnameMap": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"yaql": {"expression": "dict($.data.where($ != null).flatten().selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))", "data": [{"get_attr": ["Controller", "hostname_map"]}]}}}}, "ControllerMergedConfigSettings": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"service_config_settings": {}, "global_config_settings": {}, "config_settings": {}, "merged_config_settings": {"map_merge": [{"get_attr": ["ControllerServiceConfigSettings", "value"]}, {"get_param": "ExtraConfig"}, {"get_param": "ControllerExtraConfig"}]}}}}, "DeploymentServerBlacklistDict": {"type": "OS::Heat::Value", "properties": {"type": "json", "value": {"map_merge": {"repeat": {"for_each": {"hostname": {"get_param": "DeploymentServerBlacklist"}}, "template": {"hostname": 1}}}}}}, "StorageMgmtVirtualIP": {"depends_on": "Networks", "type": "OS::TripleO::Network::Ports::StorageMgmtVipPort", "properties": {"PortName": "storage_management_virtual_ip", "FixedIPs": {"get_param": "StorageMgmtVirtualFixedIPs"}, "ControlPlaneIP": {"get_attr": ["ControlVirtualIP", "fixed_ips", 0, "ip_address"]}}}, "HorizonSecret": {"type": "OS::TripleO::RandomString", "properties": {"length": 10}}, "ControllerAllNodesDeployment": {"depends_on": ["ControllerHostsDeployment"], "type": "OS::TripleO::AllNodesDeployment", "properties": {"input_values": {"bootstrap_nodeid": {"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "hostname"]}}}, "bootstrap_nodeid_ip": {"yaql": {"expression": "coalesce($.data, []).first(null)", "data": {"get_attr": ["Controller", "ip_address"]}}}}, "config": {"get_attr": ["allNodesConfig", "config_id"]}, "name": "ControllerAllNodesDeployment", "servers": {"get_attr": ["ControllerServers", "value"]}}}, "VipHosts": {"type": "OS::Heat::Value", "properties": {"type": "string", "value": {"list_join": ["
0.355 | 3311: ", [{"str_replace": {"params": {"IP": {"get_attr": ["VipMap", "net_ip_map", "ctlplane"]}, "HOST": {"get_param": "CloudNameCtlplane"}}, "template": "IP HOST"}}, {"str_replace": {"params": {"IP": {"get_attr": ["VipMap", "net_ip_map", "external"]}, "HOST": {"get_param": "CloudName"}}, "template": "IP HOST"}}, {"str_replace": {"params": {"IP": {"get_attr": ["VipMap", "net_ip_map", "internal_api"]}, "HOST": {"get_param": "CloudNameInternal"}}, "template": "IP HOST"}}, {"str_replace": {"params": {"IP": {"get_attr": ["VipMap", "net_ip_map", "storage"]}, "HOST": {"get_param": "CloudNameStorage"}}, "template": "IP HOST"}}, {"str_replace": {"params": {"IP": {"get_attr": ["VipMap", "net_ip_map", "storage_mgmt"]}, "HOST": {"get_param": "CloudNameStorageManagement"}}, "template": "IP HOST"}}]]}}}, "PublicVirtualIP": {"depends_on": "Networks", "type": "OS::TripleO::Network::Ports::ExternalVipPort", "properties": {"PortName": "public_virtual_ip", "FixedIPs": {"get_param": "PublicVirtualFixedIPs"}, "ControlPlaneIP": {"get_attr": ["ControlVirtualIP", "fixed_ips", 0, "ip_address"]}, "ControlPlaneNetwork": {"get_param": "NeutronControlPlaneID"}}}, "Networks": {"type": "OS::TripleO::Network"}, "ControllerHostsDeployment": {"type": "OS::Heat::StructuredDeployments", "properties": {"config": {"get_attr": ["hostsConfig", "config_id"]}, "name": "ControllerHostsDeployment", "servers": {"get_attr": ["ControllerServers", "value"]}}}}}}' http://192.168.24.1:8004/v1/2bc0cdfdc3664800b9a07e4b9e4b0882/validate log_curl_request /usr/lib/python2.7/site-packages/heatclient/common/http.py:141
0.000 | 3312: 2017-09-14 19:02:10.545 2342 DEBUG heatclient.common.http [req-b09321cf-f105-401a-be3d-a7e01aa86a87 ad40ba3b56b8405a8843c9e9e0032aec 2bc0cdfdc3664800b9a07e4b9e4b0882 - default default]

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/messages.txt.gz
0.000 | 0034: Sep 14 17:59:00 localhost kernel: NODE_DATA(0) allocated [mem 0x233fd7000-0x233ffdfff]
0.000 | 0035: Sep 14 17:59:00 localhost kernel: kvm-clock: Using msrs 4b564d01 and 4b564d00
0.000 | 0036: Sep 14 17:59:00 localhost kernel: kvm-clock: cpu 0, msr 2:33f87001, primary cpu clock
0.329 | 0037: Sep 14 17:59:00 localhost kernel: kvm-clock: using sched offset of 1239040549 cycles
0.000 | 0038: Sep 14 17:59:00 localhost kernel: Zone ranges:

0.000 | 0083: Sep 14 17:59:00 localhost kernel: Kernel command line: BOOT_IMAGE=/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64 root=LABEL=cloudimg-rootfs ro nofb nomodeset vga=normal console=tty0 console=ttyS0,115200 no_timer_check
0.000 | 0084: Sep 14 17:59:00 localhost kernel: PID hash table entries: 4096 (order: 3, 32768 bytes)
0.000 | 0085: Sep 14 17:59:00 localhost kernel: x86/fpu: xstate_offset[2]: 0240, xstate_sizes[2]: 0100
0.510 | 0086: Sep 14 17:59:00 localhost kernel: xsave: enabled xstate_bv 0x7, cntxt size 0x340 using standard form
0.000 | 0087: Sep 14 17:59:00 localhost kernel: Memory: 5139612k/9240576k available (6886k kernel code, 1049112k absent, 234036k reserved, 4545k data, 1764k init)

0.000 | 0126: Sep 14 17:59:00 localhost kernel: Enabled x2apic
0.000 | 0127: Sep 14 17:59:00 localhost kernel: Switched APIC routing to physical x2apic.
0.000 | 0128: Sep 14 17:59:00 localhost kernel: ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
0.268 | 0129: Sep 14 17:59:00 localhost kernel: smpboot: CPU0: Intel Core Processor (Haswell, no TSX) (fam: 06, model: 3c, stepping: 01)
0.000 | 0130: Sep 14 17:59:00 localhost kernel: Performance Events: unsupported p6 CPU model 60 no PMU driver, software events only.

0.000 | 0780: Sep 14 17:59:03 localhost growroot: + resize2fs /dev/vda1
0.000 | 0781: Sep 14 17:59:03 localhost growroot: resize2fs 1.42.9 (28-Dec-2013)
0.000 | 0782: Sep 14 17:59:03 localhost kernel: EXT4-fs (vda1): resizing filesystem from 3265664 to 20971259 blocks
0.519 | 0783: Sep 14 17:59:03 localhost dhclient[705]: DHCPDISCOVER on eth0 to 255.255.255.255 port 67 interval 3 (xid=0x747611ab)
0.340 | 0784: Sep 14 17:59:03 localhost dhclient[705]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x747611ab)
0.330 | 0785: Sep 14 17:59:03 localhost dhclient[705]: DHCPOFFER from 149.202.160.1
0.376 | 0786: Sep 14 17:59:03 localhost dhclient[705]: DHCPACK from 149.202.160.1 (xid=0x747611ab)
0.542 | 0787: Sep 14 17:59:03 localhost dhclient[705]: suspect value in domain_search option - discarded
0.000 | 0788: Sep 14 17:59:03 localhost kernel: EXT4-fs (vda1): resized filesystem to 20971259

0.000 | 0794: Sep 14 17:59:04 localhost ntpd_intres[529]: host name not found: 1.centos.pool.ntp.org
0.000 | 0795: Sep 14 17:59:04 localhost ntpd_intres[529]: host name not found: 2.centos.pool.ntp.org
0.000 | 0796: Sep 14 17:59:04 localhost ntpd_intres[529]: host name not found: 3.centos.pool.ntp.org
0.708 | 0797: Sep 14 17:59:05 localhost NET[756]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.525 | 0798: Sep 14 17:59:05 localhost dhclient[705]: Error printing text.
0.604 | 0799: Sep 14 17:59:05 localhost dhclient[705]: bound to 149.202.161.193 -- renewal in 40600 seconds.
0.579 | 0800: Sep 14 17:59:05 localhost ifup: Determining IP information for eth0... done.
0.000 | 0801: Sep 14 17:59:05 localhost systemd: Started Glean for interface eth0.

0.000 | 0804: Sep 14 17:59:05 localhost systemd: Starting LSB: Bring up/down networking...
0.000 | 0805: Sep 14 17:59:05 localhost network: Bringing up loopback interface: [ OK ]
0.000 | 0806: Sep 14 17:59:06 localhost network: Bringing up interface eth0:
0.618 | 0807: Sep 14 17:59:06 localhost dhclient[929]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.647 | 0808: Sep 14 17:59:06 localhost network: Determining IP information for eth0...Can't create /var/run/dhclient-eth0.pid: Permission denied
0.340 | 0809: Sep 14 17:59:06 localhost dhclient[929]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x5e46b79f)
0.000 | 0810: Sep 14 17:59:06 localhost kernel: iptables dropped: IN=eth0 OUT= MAC=ff:ff:ff:ff:ff:ff:86:a7:eb:72:81:f5:08:00 SRC=149.202.160.1 DST=255.255.255.255 LEN=309 TOS=0x00 PREC=0x00 TTL=64 ID=0 PROTO=UDP SPT=67 DPT=68 LEN=289
0.376 | 0811: Sep 14 17:59:06 localhost dhclient[929]: DHCPACK from 149.202.160.1 (xid=0x5e46b79f)
0.542 | 0812: Sep 14 17:59:06 localhost dhclient[929]: suspect value in domain_search option - discarded
0.000 | 0813: Sep 14 17:59:07 localhost ntpd[521]: Listen normally on 4 eth0 149.202.161.193 UDP 123
0.000 | 0814: Sep 14 17:59:07 localhost ntpd[521]: Listen normally on 5 eth0 fe80::f816:3eff:fe8f:1c3d UDP 123
0.708 | 0815: Sep 14 17:59:08 localhost NET[979]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.525 | 0816: Sep 14 17:59:08 localhost dhclient[929]: Error printing text.
0.604 | 0817: Sep 14 17:59:08 localhost dhclient[929]: bound to 149.202.161.193 -- renewal in 38236 seconds.
0.618 | 0818: Sep 14 17:59:08 localhost dhclient[981]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.528 | 0819: Sep 14 17:59:08 localhost network: done.
0.000 | 0820: Sep 14 17:59:08 localhost network: [ OK ]

0.000 | 9789: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd: [/usr/lib/systemd/system/memcached.service:72] Unknown lvalue 'RestrictNamespaces' in section 'Service'
0.000 | 9790: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd: Configuration file /etc/systemd/system/glean@.service.d/override.conf is marked executable. Please remove executable permission bits. Proceeding anyway.
0.000 | 9791: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd: Starting Simple Network Management Protocol (SNMP) Daemon....
0.627 | 9792: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 snmpd[79706]: Duplicate IPv4 address detected, some interfaces may not be visible in IP-MIB
0.000 | 9793: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd: Started Simple Network Management Protocol (SNMP) Daemon..

0.000 | 10105: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 systemd: [/usr/lib/systemd/system/memcached.service:72] Unknown lvalue 'RestrictNamespaces' in section 'Service'
0.000 | 10106: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 systemd: Configuration file /etc/systemd/system/glean@.service.d/override.conf is marked executable. Please remove executable permission bits. Proceeding anyway.
0.000 | 10107: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 puppet-user[74902]: (/Stage[main]/Nova::Scheduler/Nova::Generic_service[scheduler]/Service[nova-scheduler]/ensure) ensure changed 'stopped' to 'running'
0.584 | 10108: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: Traceback (most recent call last):
0.597 | 10109: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/eventlet/hubs/hub.py", line 457, in fire_timers
0.444 | 10110: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: timer()
0.643 | 10111: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/eventlet/hubs/timer.py", line 58, in __call__
0.462 | 10112: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: cb(*args, **kw)
0.505 | 10113: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/nova/utils.py", line 931, in context_wrapper
0.462 | 10114: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: func(*args, **kwargs)
0.394 | 10115: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/nova/scheduler/host_manager.py", line 431, in _async_init_instance_info
0.405 | 10116: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: cctxt).objects
0.573 | 10117: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_versionedobjects/base.py", line 184, in wrapper
0.638 | 10118: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: result = fn(cls, context, *args, **kwargs)
0.490 | 10119: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/nova/objects/compute_node.py", line 389, in get_all
0.481 | 10120: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: db_computes = db.compute_node_get_all(context)
0.532 | 10121: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/nova/db/api.py", line 260, in compute_node_get_all
0.540 | 10122: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return IMPL.compute_node_get_all(context)
0.568 | 10123: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/nova/db/sqlalchemy/api.py", line 264, in wrapped
0.572 | 10124: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: with ctxt_mgr.reader.using(context):
0.456 | 10125: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
0.594 | 10126: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return self.gen.next()
0.293 | 10127: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 1028, in _transaction_scope
0.674 | 10128: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: context=context) as resource:
0.456 | 10129: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
0.594 | 10130: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return self.gen.next()
0.293 | 10131: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 633, in _session
0.658 | 10132: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: bind=self.connection, mode=self.mode)
0.293 | 10133: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 398, in _create_session
0.474 | 10134: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: self._start()
0.293 | 10135: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 484, in _start
0.193 | 10136: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: engine_args, maker_args)
0.293 | 10137: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 508, in _setup_for_connection
0.193 | 10138: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: sql_connection=sql_connection, **engine_kwargs)
0.431 | 10139: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/engines.py", line 179, in create_engine
0.592 | 10140: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: test_conn = _test_connection(engine, max_retries, retry_interval)
0.431 | 10141: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/engines.py", line 357, in _test_connection
0.588 | 10142: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return engine.connect()
0.671 | 10143: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2091, in connect
0.611 | 10144: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return self._connection_cls(self, **kwargs)
0.648 | 10145: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 90, in __init__
0.506 | 10146: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: if connection is not None else engine.raw_connection()
0.613 | 10147: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2177, in raw_connection
0.563 | 10148: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: self.pool.unique_connection, _connection)
0.613 | 10149: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2147, in _wrap_pool_connect
0.395 | 10150: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return fn()
0.620 | 10151: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 328, in unique_connection
0.535 | 10152: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return _ConnectionFairy._checkout(self)
0.620 | 10153: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 766, in _checkout
0.437 | 10154: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: fairy = _ConnectionRecord.checkout(pool)
0.620 | 10155: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 516, in checkout
0.437 | 10156: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: rec = pool._do_get()
0.620 | 10157: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 1138, in _do_get
0.474 | 10158: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: self._dec_overflow()
0.555 | 10159: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/util/langhelpers.py", line 66, in __exit__
0.491 | 10160: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: compat.reraise(exc_type, exc_value, exc_tb)
0.620 | 10161: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 1135, in _do_get
0.535 | 10162: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return self._create_connection()
0.620 | 10163: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 333, in _create_connection
0.535 | 10164: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return _ConnectionRecord(self)
0.653 | 10165: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 461, in __init__
0.502 | 10166: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: self.__connect(first_connect_check=True)
0.620 | 10167: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 651, in __connect
0.606 | 10168: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: connection = pool._invoke_creator(self)
0.694 | 10169: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/strategies.py", line 105, in connect
0.556 | 10170: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return dialect.connect(*cargs, **cparams)
0.678 | 10171: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py", line 393, in connect
0.621 | 10172: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return self.dbapi.connect(*cargs, **cparams)
0.653 | 10173: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/pymysql/__init__.py", line 90, in Connect
0.589 | 10174: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: return Connection(*args, **kwargs)
0.639 | 10175: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib/python2.7/site-packages/pymysql/connections.py", line 618, in __init__
0.560 | 10176: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: cfg.read(os.path.expanduser(read_default_file))
0.525 | 10177: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/ConfigParser.py", line 305, in read
0.474 | 10178: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: self._read(fp, filename)
0.456 | 10179: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: File "/usr/lib64/python2.7/ConfigParser.py", line 546, in _read
0.193 | 10180: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: raise e
0.610 | 10181: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: ParsingError: File contains parsing errors: /etc/my.cnf
0.557 | 10182: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler: [line 17]: '!includedir /etc/my.cnf.d
0.557 | 10182: '
0.000 | 10183: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 systemd: Reloading.

0.001 | 11067: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config: emaker::Database::Mysql/File[/root/.my.cnf]/mode: mode changed '0644' to '0600'\",
0.001 | 11067: \"Notice: /Stage[main]/Tripleo::Profile::Pacemaker::Database::Mysql/File[/root/.my.cnf]/seltype: seltype changed 'admin_home_t' to 'mysqld_home_t'\",
0.001 | 11067: \"Notice: /Stage[main]/Vswitch::Ovs/Service[openvswitch]/enable: enable changed 'false' to 'true'\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Package[neutron-ovs-agent]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::install::end]: Triggered 'refresh' from 1 events\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/bridge_mappings]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/l2_population]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/arp_responder]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/enable_distributed_routing]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/drop_flows_on_start]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/extensions]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/integration_bridge]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[securitygroup/firewall_driver]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/tunnel_bridge]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/local_ip]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/tunnel_types]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/vxlan_udp_port]/ensure: created\",
0.001 | 11067: \"Notice: /Stage[main]/Neutron::Agents::
0.021 | 11068: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config: Ml2::Ovs/Service[ovs-cleanup-service]/enable: enable changed 'false' to 'true'\",
0.021 | 11068: \"Notice: /Stage[main]/Tripleo::Profile::Base::Kernel/Kmod::Load[nf_conntrack_proto_sctp]/Exec[modprobe nf_conntrack_proto_sctp]/returns: executed successfully\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::config::end]: Triggered 'refresh' from 12 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron::Plugins::Ovs::Bridge[datacentre:br-ex]/Vs_bridge[br-ex]/external_ids: external_ids changed '' to 'bridge-id=br-ex'\",
0.021 | 11068: \"Notice: /Stage[main]/Cinder::Deps/Anchor[cinder::service::end]: Triggered 'refresh' from 1 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Db::Sync/Exec[neutron-db-sync]: Triggered 'refresh' from 2 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::dbsync::end]: Triggered 'refresh' from 1 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::service::begin]: Triggered 'refresh' from 3 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Agents::Dhcp/Service[neutron-dhcp-service]: Triggered 'refresh' from 1 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Server/Service[neutron-server]: Triggered 'refresh' from 1 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Agents::L3/Service[neutron-l3]: Triggered 'refresh' from 1 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Agents::Metadata/Service[neutron-metadata]: Triggered 'refresh' from 1 events\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Service[neutron-ovs-agent-service]/ensure: ensure changed 'stopped' to 'running'\",
0.021 | 11068: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::service::end]: Triggered 'refresh' from 5 events\",
0.021 | 11068: \"Notice: /Stage[main]/Nova::Cell_v2::Simple_setup/Nova_cell_v2[cell0]/database_connection: database_connection changed 'mysql+pymysql://nova:Baq7zRVmDVdgsa7Pgre26EpEw@192.168.24.9/nova_cell0?read_default_group=tripleo' to 'default'\",
0.021 | 11068: \"Notice: /Stage[main]/Nova::Cron::Archive_deleted_rows
0.063 | 11069: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config: /Cron[nova-manage db archive_deleted_rows]/ensure: created\",
0.063 | 11069: \"Notice: /Stage[main]/Keystone::Deps/Anchor[keystone::service::end]: Triggered 'refresh' from 1 events\",
0.063 | 11069: \"Notice: /Stage[main]/Nova::Cell_v2::Discover_hosts/Exec[nova-cell_v2-discover_hosts]/returns: executed successfully\",
0.063 | 11069: \"Notice: /Stage[main]/Nova::Cell_v2::Discover_hosts/Exec[nova-cell_v2-discover_hosts]: Triggered 'refresh' from 1 events\",
0.063 | 11069: \"Notice: Applied catalog in 185.16 seconds\"
0.063 | 11069: ],
0.063 | 11069: \"failed\": false,
0.063 | 11069: \"failed_when_result\": false
0.063 | 11069: }
0.063 | 11069:
0.063 | 11069: TASK [Run docker-puppet tasks (generate config)] *******************************
0.063 | 11069: skipping: [localhost]
0.063 | 11069:
0.063 | 11069: TASK [debug] *******************************************************************
0.063 | 11069: ok: [localhost] => {
0.063 | 11069: \"(outputs.stderr|default('')).split('\
0.063 | 11069: ')|union(outputs.stdout_lines|default([]))\": [
0.063 | 11069: \"\"
0.063 | 11069: ],
0.063 | 11069: \"failed\": false,
0.063 | 11069: \"failed_when_result\": false
0.063 | 11069: }
0.063 | 11069:
0.063 | 11069: TASK [Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_5.json exists] ***
0.063 | 11069: ok: [localhost]
0.063 | 11069:
0.063 | 11069: TASK [Start containers for step 5] *********************************************
0.063 | 11069: skipping: [localhost]
0.063 | 11069:
0.063 | 11069: TASK [debug] *******************************************************************
0.063 | 11069: ok: [localhost] => {
0.063 | 11069: \"(outputs.stderr|default('')).split('\
0.063 | 11069: ')|union(outputs.stdout_lines|default([]))\": [
0.063 | 11069: \"\"
0.063 | 11069: ],
0.063 | 11069: \"failed\": false,
0.063 | 11069: \"failed_when_result\": false
0.063 | 11069: }
0.063 | 11069:
0.063 | 11069: TASK [Check if /var/lib/docker-puppet/docker-puppet-tasks5.json exists] ********
0.063 | 11069: ok: [localhost]
0.063 | 11069:
0.063 | 11069: TASK [Run docker-puppet tasks (bootstrap tasks)] *******************************
0.063 | 11069: skipping: [localhost]
0.063 | 11069:
0.063 | 11069: TASK [debug] *******************************************************************
0.063 | 11069: ok: [localhost] => {
0.063 | 11069: \"(outputs.stderr|default('')).split('\
0.063 | 11069: ')|union(outputs.stdout_lines|default([]))\": [
0.063 | 11069: \"\"
0.063 | 11069: ],
0.063 | 11069: \"failed\": false,
0.063 | 11069: \"failed_when_result\": false
0.063 | 11069: }
0.063 | 11069:
0.063 | 11069: PLAY RECAP ***********************************************************
0.341 | 11070: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config: **********
0.341 | 11070: localhost : ok=9 changed=1 unreachable=0 failed=0
0.341 | 11070:
0.341 | 11070: ", "deploy_stderr": "", "deploy_status_code": 0}
0.000 | 11071: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config: [2017-09-14 19:39:58,644] (heat-config) [DEBUG] [2017-09-14 19:36:00,045] (heat-config) [DEBUG] Running ansible-playbook -i localhost, --module-path /usr/share/ansible-modules /var/lib/heat-config/heat-config-ansible/e6d6bd06-c05b-463c-8d28-5bc0b2cfe29d_playbook.yaml --extra-vars @/var/lib/heat-config/heat-config-ansible/e6d6bd06-c05b-463c-8d28-5bc0b2cfe29d_variables.json

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/journal.txt.gz
0.000 | 0058: Sep 14 17:59:00 localhost kernel: NODE_DATA(0) allocated [mem 0x233fd7000-0x233ffdfff]
0.000 | 0059: Sep 14 17:59:00 localhost kernel: kvm-clock: Using msrs 4b564d01 and 4b564d00
0.000 | 0060: Sep 14 17:59:00 localhost kernel: kvm-clock: cpu 0, msr 2:33f87001, primary cpu clock
0.320 | 0061: Sep 14 17:59:00 localhost kernel: kvm-clock: using sched offset of 1239040549 cycles
0.000 | 0062: Sep 14 17:59:00 localhost kernel: Zone ranges:

0.000 | 0123: Sep 14 17:59:00 localhost kernel: Kernel command line: BOOT_IMAGE=/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64 root=LABEL=cloudimg-rootfs ro nofb nomodeset vga=normal console=tty0 console=ttyS0,115200 no_timer_check
0.000 | 0124: Sep 14 17:59:00 localhost kernel: PID hash table entries: 4096 (order: 3, 32768 bytes)
0.000 | 0125: Sep 14 17:59:00 localhost kernel: x86/fpu: xstate_offset[2]: 0240, xstate_sizes[2]: 0100
0.489 | 0126: Sep 14 17:59:00 localhost kernel: xsave: enabled xstate_bv 0x7, cntxt size 0x340 using standard form
0.000 | 0127: Sep 14 17:59:00 localhost kernel: Memory: 5139612k/9240576k available (6886k kernel code, 1049112k absent, 234036k reserved, 4545k data, 1764k init)

0.000 | 0167: Sep 14 17:59:00 localhost kernel: Enabled x2apic
0.000 | 0168: Sep 14 17:59:00 localhost kernel: Switched APIC routing to physical x2apic.
0.000 | 0169: Sep 14 17:59:00 localhost kernel: ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
0.267 | 0170: Sep 14 17:59:00 localhost kernel: smpboot: CPU0: Intel Core Processor (Haswell, no TSX) (fam: 06, model: 3c, stepping: 01)
0.443 | 0171: Sep 14 17:59:00 localhost kernel: TSC deadline timer enabled
0.000 | 0172: Sep 14 17:59:00 localhost kernel: Performance Events: unsupported p6 CPU model 60 no PMU driver, software events only.

0.083 | 0882: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 growroot[540]: + resize2fs /dev/vda1
0.000 | 0883: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 growroot[540]: resize2fs 1.42.9 (28-Dec-2013)
0.000 | 0884: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 kernel: EXT4-fs (vda1): resizing filesystem from 3265664 to 20971259 blocks
0.505 | 0885: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[705]: DHCPDISCOVER on eth0 to 255.255.255.255 port 67 interval 3 (xid=0x747611ab)
0.310 | 0886: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[705]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x747611ab)
0.304 | 0887: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[705]: DHCPOFFER from 149.202.160.1
0.355 | 0888: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[705]: DHCPACK from 149.202.160.1 (xid=0x747611ab)
0.532 | 0889: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[705]: suspect value in domain_search option - discarded
0.000 | 0890: Sep 14 17:59:03 centos-7-2-node-ovh-gra1-10937570-899082 kernel: EXT4-fs (vda1): resized filesystem to 20971259

0.000 | 0896: Sep 14 17:59:04 centos-7-2-node-ovh-gra1-10937570-899082 ntpd_intres[529]: host name not found: 1.centos.pool.ntp.org
0.000 | 0897: Sep 14 17:59:04 centos-7-2-node-ovh-gra1-10937570-899082 ntpd_intres[529]: host name not found: 2.centos.pool.ntp.org
0.000 | 0898: Sep 14 17:59:04 centos-7-2-node-ovh-gra1-10937570-899082 ntpd_intres[529]: host name not found: 3.centos.pool.ntp.org
0.689 | 0899: Sep 14 17:59:05 centos-7-2-node-ovh-gra1-10937570-899082 NET[756]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.544 | 0900: Sep 14 17:59:05 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[705]: Error printing text.
0.596 | 0901: Sep 14 17:59:05 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[705]: bound to 149.202.161.193 -- renewal in 40600 seconds.
0.581 | 0902: Sep 14 17:59:05 centos-7-2-node-ovh-gra1-10937570-899082 ifup[681]: Determining IP information for eth0... done.
0.000 | 0903: Sep 14 17:59:05 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: Started Glean for interface eth0.

0.000 | 0906: Sep 14 17:59:05 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: Starting LSB: Bring up/down networking...
0.000 | 0907: Sep 14 17:59:05 centos-7-2-node-ovh-gra1-10937570-899082 network[800]: Bringing up loopback interface: [ OK ]
0.000 | 0908: Sep 14 17:59:06 centos-7-2-node-ovh-gra1-10937570-899082 network[800]: Bringing up interface eth0:
0.620 | 0909: Sep 14 17:59:06 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[929]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.649 | 0910: Sep 14 17:59:06 centos-7-2-node-ovh-gra1-10937570-899082 network[800]: Determining IP information for eth0...Can't create /var/run/dhclient-eth0.pid: Permission denied
0.310 | 0911: Sep 14 17:59:06 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[929]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x5e46b79f)
0.000 | 0912: Sep 14 17:59:06 centos-7-2-node-ovh-gra1-10937570-899082 kernel: iptables dropped: IN=eth0 OUT= MAC=ff:ff:ff:ff:ff:ff:86:a7:eb:72:81:f5:08:00 SRC=149.202.160.1 DST=255.255.255.255 LEN=309 TOS=0x00 PREC=0x00 TTL=64 ID=0 PROTO=UDP SPT=67 DPT=68 LEN=289
0.355 | 0913: Sep 14 17:59:06 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[929]: DHCPACK from 149.202.160.1 (xid=0x5e46b79f)
0.532 | 0914: Sep 14 17:59:06 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[929]: suspect value in domain_search option - discarded
0.000 | 0915: Sep 14 17:59:07 centos-7-2-node-ovh-gra1-10937570-899082 ntpd[521]: Listen normally on 4 eth0 149.202.161.193 UDP 123
0.000 | 0916: Sep 14 17:59:07 centos-7-2-node-ovh-gra1-10937570-899082 ntpd[521]: Listen normally on 5 eth0 fe80::f816:3eff:fe8f:1c3d UDP 123
0.000 | 0917: Sep 14 17:59:07 centos-7-2-node-ovh-gra1-10937570-899082 ntpd[521]: new interface(s) found: waking up resolver
0.689 | 0918: Sep 14 17:59:08 centos-7-2-node-ovh-gra1-10937570-899082 NET[979]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.544 | 0919: Sep 14 17:59:08 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[929]: Error printing text.
0.596 | 0920: Sep 14 17:59:08 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[929]: bound to 149.202.161.193 -- renewal in 38236 seconds.
0.620 | 0921: Sep 14 17:59:08 centos-7-2-node-ovh-gra1-10937570-899082 dhclient[981]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.530 | 0922: Sep 14 17:59:08 centos-7-2-node-ovh-gra1-10937570-899082 network[800]: done.
0.000 | 0923: Sep 14 17:59:08 centos-7-2-node-ovh-gra1-10937570-899082 network[800]: [ OK ]

0.000 | 10890: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: [/usr/lib/systemd/system/memcached.service:72] Unknown lvalue 'RestrictNamespaces' in section 'Service'
0.000 | 10891: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: Configuration file /etc/systemd/system/glean@.service.d/override.conf is marked executable. Please remove executable permission bits. Proceeding anyway.
0.000 | 10892: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: Starting Simple Network Management Protocol (SNMP) Daemon....
0.637 | 10893: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 snmpd[79706]: Duplicate IPv4 address detected, some interfaces may not be visible in IP-MIB
0.000 | 10894: Sep 14 19:33:18 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: Started Simple Network Management Protocol (SNMP) Daemon..

0.000 | 11234: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: [/usr/lib/systemd/system/memcached.service:72] Unknown lvalue 'RestrictNamespaces' in section 'Service'
0.000 | 11235: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: Configuration file /etc/systemd/system/glean@.service.d/override.conf is marked executable. Please remove executable permission bits. Proceeding anyway.
0.000 | 11236: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 puppet-user[74902]: (/Stage[main]/Nova::Scheduler/Nova::Generic_service[scheduler]/Service[nova-scheduler]/ensure) ensure changed 'stopped' to 'running'
0.588 | 11237: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: Traceback (most recent call last):
0.600 | 11238: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/eventlet/hubs/hub.py", line 457, in fire_timers
0.446 | 11239: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: timer()
0.645 | 11240: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/eventlet/hubs/timer.py", line 58, in __call__
0.467 | 11241: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: cb(*args, **kw)
0.508 | 11242: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/nova/utils.py", line 931, in context_wrapper
0.467 | 11243: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: func(*args, **kwargs)
0.398 | 11244: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/nova/scheduler/host_manager.py", line 431, in _async_init_instance_info
0.411 | 11245: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: cctxt).objects
0.605 | 11246: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_versionedobjects/base.py", line 184, in wrapper
0.641 | 11247: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: result = fn(cls, context, *args, **kwargs)
0.494 | 11248: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/nova/objects/compute_node.py", line 389, in get_all
0.485 | 11249: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: db_computes = db.compute_node_get_all(context)
0.536 | 11250: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/nova/db/api.py", line 260, in compute_node_get_all
0.522 | 11251: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return IMPL.compute_node_get_all(context)
0.572 | 11252: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/nova/db/sqlalchemy/api.py", line 264, in wrapped
0.576 | 11253: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: with ctxt_mgr.reader.using(context):
0.456 | 11254: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
0.579 | 11255: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return self.gen.next()
0.291 | 11256: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 1028, in _transaction_scope
0.673 | 11257: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: context=context) as resource:
0.456 | 11258: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
0.579 | 11259: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return self.gen.next()
0.291 | 11260: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 633, in _session
0.660 | 11261: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: bind=self.connection, mode=self.mode)
0.291 | 11262: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 398, in _create_session
0.478 | 11263: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: self._start()
0.291 | 11264: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 484, in _start
0.199 | 11265: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: engine_args, maker_args)
0.291 | 11266: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py", line 508, in _setup_for_connection
0.199 | 11267: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: sql_connection=sql_connection, **engine_kwargs)
0.429 | 11268: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/engines.py", line 179, in create_engine
0.596 | 11269: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: test_conn = _test_connection(engine, max_retries, retry_interval)
0.429 | 11270: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/engines.py", line 357, in _test_connection
0.573 | 11271: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return engine.connect()
0.674 | 11272: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2091, in connect
0.612 | 11273: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return self._connection_cls(self, **kwargs)
0.651 | 11274: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 90, in __init__
0.512 | 11275: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: if connection is not None else engine.raw_connection()
0.617 | 11276: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2177, in raw_connection
0.566 | 11277: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: self.pool.unique_connection, _connection)
0.617 | 11278: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 2147, in _wrap_pool_connect
0.372 | 11279: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return fn()
0.623 | 11280: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 328, in unique_connection
0.517 | 11281: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return _ConnectionFairy._checkout(self)
0.623 | 11282: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 766, in _checkout
0.441 | 11283: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: fairy = _ConnectionRecord.checkout(pool)
0.623 | 11284: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 516, in checkout
0.441 | 11285: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: rec = pool._do_get()
0.623 | 11286: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 1138, in _do_get
0.478 | 11287: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: self._dec_overflow()
0.560 | 11288: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/util/langhelpers.py", line 66, in __exit__
0.495 | 11289: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: compat.reraise(exc_type, exc_value, exc_tb)
0.623 | 11290: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 1135, in _do_get
0.517 | 11291: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return self._create_connection()
0.623 | 11292: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 333, in _create_connection
0.517 | 11293: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return _ConnectionRecord(self)
0.656 | 11294: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 461, in __init__
0.507 | 11295: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: self.__connect(first_connect_check=True)
0.623 | 11296: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/pool.py", line 651, in __connect
0.610 | 11297: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: connection = pool._invoke_creator(self)
0.699 | 11298: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/strategies.py", line 105, in connect
0.539 | 11299: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return dialect.connect(*cargs, **cparams)
0.681 | 11300: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py", line 393, in connect
0.606 | 11301: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return self.dbapi.connect(*cargs, **cparams)
0.656 | 11302: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/pymysql/__init__.py", line 90, in Connect
0.572 | 11303: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: return Connection(*args, **kwargs)
0.641 | 11304: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib/python2.7/site-packages/pymysql/connections.py", line 618, in __init__
0.564 | 11305: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: cfg.read(os.path.expanduser(read_default_file))
0.526 | 11306: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/ConfigParser.py", line 305, in read
0.478 | 11307: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: self._read(fp, filename)
0.456 | 11308: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: File "/usr/lib64/python2.7/ConfigParser.py", line 546, in _read
0.199 | 11309: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: raise e
0.613 | 11310: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: ParsingError: File contains parsing errors: /etc/my.cnf
0.557 | 11311: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 nova-scheduler[83772]: [line 17]: '!includedir /etc/my.cnf.d
0.557 | 11311: '
0.000 | 11312: Sep 14 19:34:39 centos-7-2-node-ovh-gra1-10937570-899082 systemd[1]: Reloading.

0.001 | 12309: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config[21630]: emaker::Database::Mysql/File[/root/.my.cnf]/mode: mode changed '0644' to '0600'\",
0.001 | 12309: \"Notice: /Stage[main]/Tripleo::Profile::Pacemaker::Database::Mysql/File[/root/.my.cnf]/seltype: seltype changed 'admin_home_t' to 'mysqld_home_t'\",
0.001 | 12309: \"Notice: /Stage[main]/Vswitch::Ovs/Service[openvswitch]/enable: enable changed 'false' to 'true'\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Package[neutron-ovs-agent]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::install::end]: Triggered 'refresh' from 1 events\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/bridge_mappings]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/l2_population]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/arp_responder]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/enable_distributed_routing]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/drop_flows_on_start]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/extensions]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/integration_bridge]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[securitygroup/firewall_driver]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/tunnel_bridge]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[ovs/local_ip]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/tunnel_types]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron_agent_ovs[agent/vxlan_udp_port]/ensure: created\",
0.001 | 12309: \"Notice: /Stage[main]/Neutron::Agents::
0.021 | 12310: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config[21630]: Ml2::Ovs/Service[ovs-cleanup-service]/enable: enable changed 'false' to 'true'\",
0.021 | 12310: \"Notice: /Stage[main]/Tripleo::Profile::Base::Kernel/Kmod::Load[nf_conntrack_proto_sctp]/Exec[modprobe nf_conntrack_proto_sctp]/returns: executed successfully\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::config::end]: Triggered 'refresh' from 12 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Neutron::Plugins::Ovs::Bridge[datacentre:br-ex]/Vs_bridge[br-ex]/external_ids: external_ids changed '' to 'bridge-id=br-ex'\",
0.021 | 12310: \"Notice: /Stage[main]/Cinder::Deps/Anchor[cinder::service::end]: Triggered 'refresh' from 1 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Db::Sync/Exec[neutron-db-sync]: Triggered 'refresh' from 2 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::dbsync::end]: Triggered 'refresh' from 1 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::service::begin]: Triggered 'refresh' from 3 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Agents::Dhcp/Service[neutron-dhcp-service]: Triggered 'refresh' from 1 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Server/Service[neutron-server]: Triggered 'refresh' from 1 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Agents::L3/Service[neutron-l3]: Triggered 'refresh' from 1 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Agents::Metadata/Service[neutron-metadata]: Triggered 'refresh' from 1 events\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Agents::Ml2::Ovs/Service[neutron-ovs-agent-service]/ensure: ensure changed 'stopped' to 'running'\",
0.021 | 12310: \"Notice: /Stage[main]/Neutron::Deps/Anchor[neutron::service::end]: Triggered 'refresh' from 5 events\",
0.021 | 12310: \"Notice: /Stage[main]/Nova::Cell_v2::Simple_setup/Nova_cell_v2[cell0]/database_connection: database_connection changed 'mysql+pymysql://nova:Baq7zRVmDVdgsa7Pgre26EpEw@192.168.24.9/nova_cell0?read_default_group=tripleo' to 'default'\",
0.021 | 12310: \"Notice: /Stage[main]/Nova::Cron::Archive_deleted_rows
0.062 | 12311: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config[21630]: /Cron[nova-manage db archive_deleted_rows]/ensure: created\",
0.062 | 12311: \"Notice: /Stage[main]/Keystone::Deps/Anchor[keystone::service::end]: Triggered 'refresh' from 1 events\",
0.062 | 12311: \"Notice: /Stage[main]/Nova::Cell_v2::Discover_hosts/Exec[nova-cell_v2-discover_hosts]/returns: executed successfully\",
0.062 | 12311: \"Notice: /Stage[main]/Nova::Cell_v2::Discover_hosts/Exec[nova-cell_v2-discover_hosts]: Triggered 'refresh' from 1 events\",
0.062 | 12311: \"Notice: Applied catalog in 185.16 seconds\"
0.062 | 12311: ],
0.062 | 12311: \"failed\": false,
0.062 | 12311: \"failed_when_result\": false
0.062 | 12311: }
0.062 | 12311:
0.062 | 12311: TASK [Run docker-puppet tasks (generate config)] *******************************
0.062 | 12311: skipping: [localhost]
0.062 | 12311:
0.062 | 12311: TASK [debug] *******************************************************************
0.062 | 12311: ok: [localhost] => {
0.062 | 12311: \"(outputs.stderr|default('')).split('\
0.062 | 12311: ')|union(outputs.stdout_lines|default([]))\": [
0.062 | 12311: \"\"
0.062 | 12311: ],
0.062 | 12311: \"failed\": false,
0.062 | 12311: \"failed_when_result\": false
0.062 | 12311: }
0.062 | 12311:
0.062 | 12311: TASK [Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_5.json exists] ***
0.062 | 12311: ok: [localhost]
0.062 | 12311:
0.062 | 12311: TASK [Start containers for step 5] *********************************************
0.062 | 12311: skipping: [localhost]
0.062 | 12311:
0.062 | 12311: TASK [debug] *******************************************************************
0.062 | 12311: ok: [localhost] => {
0.062 | 12311: \"(outputs.stderr|default('')).split('\
0.062 | 12311: ')|union(outputs.stdout_lines|default([]))\": [
0.062 | 12311: \"\"
0.062 | 12311: ],
0.062 | 12311: \"failed\": false,
0.062 | 12311: \"failed_when_result\": false
0.062 | 12311: }
0.062 | 12311:
0.062 | 12311: TASK [Check if /var/lib/docker-puppet/docker-puppet-tasks5.json exists] ********
0.062 | 12311: ok: [localhost]
0.062 | 12311:
0.062 | 12311: TASK [Run docker-puppet tasks (bootstrap tasks)] *******************************
0.062 | 12311: skipping: [localhost]
0.062 | 12311:
0.062 | 12311: TASK [debug] *******************************************************************
0.062 | 12311: ok: [localhost] => {
0.062 | 12311: \"(outputs.stderr|default('')).split('\
0.062 | 12311: ')|union(outputs.stdout_lines|default([]))\": [
0.062 | 12311: \"\"
0.062 | 12311: ],
0.062 | 12311: \"failed\": false,
0.062 | 12311: \"failed_when_result\": false
0.062 | 12311: }
0.062 | 12311:
0.062 | 12311: PLAY RECAP ***********************************************************
0.379 | 12312: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config[21630]: **********
0.379 | 12312: localhost : ok=9 changed=1 unreachable=0 failed=0
0.379 | 12312:
0.379 | 12312: ", "deploy_stderr": "", "deploy_status_code": 0}
0.000 | 12313: Sep 14 19:39:58 centos-7-2-node-ovh-gra1-10937570-899082 os-collect-config[21630]: [2017-09-14 19:39:58,644] (heat-config) [DEBUG] [2017-09-14 19:36:00,045] (heat-config) [DEBUG] Running ansible-playbook -i localhost, --module-path /usr/share/ansible-modules /var/lib/heat-config/heat-config-ansible/e6d6bd06-c05b-463c-8d28-5bc0b2cfe29d_playbook.yaml --extra-vars @/var/lib/heat-config/heat-config-ansible/e6d6bd06-c05b-463c-8d28-5bc0b2cfe29d_variables.json

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/host_info.txt.gz
0.000 | 0099: system_u:system_r:systemd_logind_t:s0 root 515 0.0 0.0 24336 1524 ? Ss 17:58 0:02 /usr/lib/systemd/systemd-logind LANG=en_US.UTF-8 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin NOTIFY_SOCKET=/run/systemd/notify WATCHDOG_PID=515 WATCHDOG_USEC=180000000
0.000 | 0100: system_u:system_r:system_dbusd_t:s0-s0:c0.c1023 dbus 517 0.0 0.0 98280 1392 ? Ssl 17:58 0:04 /bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation LANG=en_US.UTF-8 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin LISTEN_PID=517 LISTEN_FDS=1
0.000 | 0101: system_u:system_r:apmd_t:s0 root 597 0.0 0.0 4340 364 ? Ss 17:58 0:00 /usr/sbin/acpid LANG=en_US.UTF-8 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin OPTIONS=
0.554 | 0102: system_u:system_r:unconfined_service_t:s0 root 755 0.0 0.0 113372 36 ? Ss 17:58 0:00 /sbin/dhclient -1 -q -cf /etc/dhcp/dhclient-eth0.conf -lf /var/lib/dhclient/dhclient--eth0.lease -pf /var/run/dhclient-eth0.pid -H centos-7-2-node-ovh-gra1-10937570 eth0 SHELL=/bin/sh OLDPWD=/etc/sysconfig/network-scripts USER=root PATH=/sbin:/usr/sbin:/bin:/usr/bin PWD=/etc/sysconfig/network-scripts LANG=en_US.UTF-8 HOME=/root SHLVL=1 ARGS=--interface eth0 --skip-dns LOGNAME=root _=/sbin/dhclient
0.543 | 0103: system_u:system_r:dhcpc_t:s0 root 978 0.0 0.0 113372 200 ? Ss 17:58 0:00 /sbin/dhclient -1 -q -cf /etc/dhcp/dhclient-eth0.conf -lf /var/lib/dhclient/dhclient--eth0.lease -pf /var/run/dhclient-eth0.pid -H centos-7-2-node-ovh-gra1-10937570 eth0 OLDPWD=/etc/sysconfig/network-scripts PATH=/sbin:/usr/sbin:/bin:/usr/bin PWD=/etc/sysconfig/network-scripts LANG=en_US.UTF-8 SHLVL=2 _=/sbin/dhclient
0.000 | 0104: system_u:system_r:getty_t:s0-s0:c0.c1023 root 1051 0.0 0.0 8508 652 tty1 Ss+ 17:58 0:00 /sbin/agetty --noclear tty1 linux LANG= PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin TERM=linux LANGUAGE= LC_CTYPE= LC_NUMERIC= LC_TIME= LC_COLLATE= LC_MONETARY= LC_MESSAGES= LC_PAPER= LC_NAME= LC_ADDRESS= LC_TELEPHONE= LC_MEASUREMENT= LC_IDENTIFICATION=

0.000 | 0658: 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
0.000 | 0659: link/ether fa:16:3e:b8:8b:c3 brd ff:ff:ff:ff:ff:ff
0.000 | 0660: inet 149.202.161.189/32 brd 149.202.161.189 scope global dynamic eth0
0.423 | 0661: valid_lft 79777sec preferred_lft 79777sec
0.000 | 0662: inet6 fe80::f816:3eff:feb8:8bc3/64 scope link

0.000 | 0896: 597 root 20 0 4340 364 360 S 0.0 0.0 0:00.00 acpid
0.000 | 0897: 24722 rabbitmq 20 0 11544 320 300 S 0.0 0.0 0:00.01 inet_getho+
0.000 | 0898: 2075 nobody 20 0 15580 236 56 S 0.0 0.0 0:00.00 dnsmasq
0.264 | 0899: 978 root 20 0 113372 200 196 S 0.0 0.0 0:00.00 dhclient
0.000 | 0900: 24260 root 20 0 44600 192 120 S 0.0 0.0 0:00.04 epmd
0.264 | 0901: 755 root 20 0 113372 36 32 S 0.0 0.0 0:00.00 dhclient
0.000 | 0902: 2 root 20 0 0 0 0 S 0.0 0.0 0:00.01 kthreadd

0.000 | 3263: "ami-launch-index": 0,
0.000 | 3264: "public-hostname": "centos-7-2-node-ovh-gra1-10937570.novalocal",
0.000 | 3265: "hostname": "centos-7-2-node-ovh-gra1-10937570.novalocal",
0.985 | 3266: "ramdisk-id": null,
0.000 | 3267: "public-keys": {

0.000 | 3272: },
0.000 | 3273: "ami-id": "ami-00021567",
0.000 | 3274: "instance-action": "none",
0.529 | 3275: "kernel-id": null,
0.000 | 3276: "public-ipv4": "",

0.000 | 3315: ec2_kernel_id => None
0.000 | 3316: ec2_local_hostname => centos-7-2-node-ovh-gra1-10937570
0.000 | 3317: ec2_local_ipv4 => 149.202.161.189
0.547 | 3318: ec2_metadata => {"ami-id"=>"ami-00021567", "ami-launch-index"=>"0", "ami-manifest-path"=>"FIXME", "block-device-mapping"=>{"ami"=>"vda", "root"=>"/dev/vda"}, "hostname"=>"centos-7-2-node-ovh-gra1-10937570", "instance-action"=>"none", "instance-id"=>"i-006a62da", "instance-type"=>"ssd-osFoundation-3", "kernel-id"=>"None", "local-hostname"=>"centos-7-2-node-ovh-gra1-10937570", "local-ipv4"=>"149.202.161.189", "placement"=>{"availability-zone"=>"nova"}, "public-hostname"=>"centos-7-2-node-ovh-gra1-10937570", "public-ipv4"=>nil, "public-keys"=>{"0"=>{"openssh-key"=>["ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLsTZJ8hXTmzjKxYh/7V07mIy8xl2HL+9BaUlt6A6TMsL3LSvaVQNSgmXX5g0XfPWSCKmkZb1O28q49jQI2n7n7+sHkxn0dJDxj1N2oNrzNY7pDuPrdtCijczLFdievygXNhXNkQ2WIqHXDquN/jfLLJ9L0jxtxtsUMbiL2xxZEZcaf/K5MqyPhscpqiVNE1MjE4xgPbIbv8gCKtPpYIIrktOMb4JbV7rhOp5DcSP5gXtLhOF5fbBpZ+szqrTVUcBX0oTYr3iRfOje9WPsTZIk9vBfBtF416mCNxMSRc7KhSW727AnUu85hS0xiP0MRAf69KemG1OE1pW+LtDIAEYp mordred@camelot", "", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvKYcWK1T7e3PKSFiqb03EYktnoxVASpPoq2rJw2JvhsP0JfS+lKrPzpUQv7L4JCuQMsPNtZ8LnwVEft39k58Kh8XMebSfaqPYAZS5zCNvQUQIhP9myOevBZf4CDeG+gmssqRFcWEwIllfDuIzKBQGVbomR+Y5QuW0HczIbkoOYI6iyf2jB6xg+bmzR2HViofNrSa62CYmHS6dO04Z95J27w6jGWpEOTBjEQvnb9sdBc4EzaBVmxCpa2EilB1u0th7/DvuH0yP4T+X8G8UjW1gZCTOVw06fqlBCST4KjdWw1F/AuOCT7048klbf4H+mCTaEcPzzu3Fkv8ckMWtS/Z9Q== jeblair@operational-necessity", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCnfoVhOTkrY7uoebL8PoHXb0Fg4jJqGCbwkxUdNUdheIdbnfyjuRG3iL8WZnzf7nzWnD+IGo6kkAo8BkNMK9L0P0Y+5IjI8NH49KU22tQ1umij4EIf5tzLh4gsqkJmy6QLrlbf10m6UF4rLFQhKzOd4b2H2K6KbP00CIymvbW3BwvNDODM4xRE2uao387qfvXZBUkB0PpRD+7fWPoN58gpFUm407Eba3WwX5PCD+1DD+RVBsG8maIDXerQ7lvFLoSuyMswv1TfkvCj0ZFhSFbfTd2ZysCu6eryFfeixR7NY9SNcp9YTqG6LrxGA7Ci6wz+hycFHXlDrlBgfFJDe5At clark@work", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3KnRBTH5QPpKjf4RWu4akzYt2gwp796cMkFl5vu8e7G/cHuh4979FeNJXMVP6F3rvZB+yXDHLCU5LBVLq0K+1GbAZT/hH38hpMOIvniwKIquvI6C/drkVPHO6YmVlapw/NI530PGnT/TAqCOycHBO5eF1bYsaqV1yZqvs9v7UZc6J4LukoLZwpmyWZ5P3ltAiiy8+FGq3SLCKWDMmv/Bjz4zTsaNbSWThJi0BydINjC1/0ze5Tyc/XgW1sDuxmmXJxgQp4EvLpronqb2hT60iA52kj8lrmoCIryRpgnbaRA7BrxKF8zIr0ZALHijxEUeWHhFJDIVRGUf0Ef0nrmBv fungi-openstack-2015", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHGuIVB/WxBd7k1R8x2FyfqT6KxRnoM7lE5RE8gvBk2r8cQeH5k1c+P5JrBvWpmqXv4satoivYOBiIb7JXEgIxx62YUx/JQ0J7k3w+av6h4iFe2OhOtEOjMF5F8/wO8a/95OeTZPzBZlUfA3hx754kuw3Q/aBKQUOHWxJOIedGyVHeJc7XiFj3RXIufFuUfng9+p4Z3q6d2/WpuKqs00WI0CLF17PkU4i8P9CraJR1dmsWW6zoxMT2G+DwMFI7ZMS3xrVBRuLwrLlbylVLW2kOJ0JeyjHnRh7X1kR7KG3cGOOjA1YQ0e+mXvremcO3/3o6Iop/N1AtqVuYCKlZc7Y9 slukjanov@mirantis.com", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL9x1rhTVOEQEanrN+ecycaDtAbbh3kr41Rxx7galtLq0JwftjsZqv2Vwl9c8ARmm8HiHcLwDoaZB9gvs6teMScCB+5a1fcohiycJBl2olNFRzkGapDaTvl74aLXQBWaV84D8tUavEl26zcgwrv9WLUsy9pnHoo5K0BzbK7vT2g3VictCphveC2vdjCDeptocWvt4zxCmAY6O7QMKeUjKMlvuy+zCohJcR4BbDnw8EriFAmCeQZcAgfLTyeAvjo384NNIFWyhCwvbCLvpgTplMCp896DWLlXu9eaGUCNjT/sZM8zafAXbfc6OKYFQ5iANAiJktWwKaUaphJkbSVWT5 elizabeth@r2d2", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3onVLOZiiGpQWTCIV0QwHmc3Jvqyl7UaJxIu7D49OQcLHqVZsozI9pSiCdTnWyAaM+E+5wD9yVcSTqMWqn2AZmZSwQ+Fh6KnCgPZ/o63+iCZPGL0RNk20M1iNh5dvdStDnn+j2fpeV/JONF0tBn07QvNL2eF4BwtbTG9Zhl186QNsXjXDghrSO3Etl6DSfcUhxyvMoA2LnclWWD5hLmiRhcBm+PIxveVsr4B+o0k1HV5SUOvJMWtbEC37AH5I818O4fNOob6CnOFaCsbA9oUDzB5rqxutPZb9SmNJpNoLqYqDgyppM0yeql0Kn97tUt7H4j5xHrWoGnJ4IXfuDc0AMmmy4fpcLGkNf7zcBftKS6iz/3AlOXjlp5WZvKxngJj9HIir2SE/qV4Lxw9936BzvAcQyw5+bEsLQJwi+LPZxEqLC6oklkX9dg/+1yBFHsz6mulA0b4Eq7VF9omRzrhhN4iPpU5KQYPRNz7yRYckXDxYnp2lz6yHgSYh2/lqMc+UqmCL9EAWcDw3jsgvJ6kH/YUVUojiRHD9QLqlhOusu1wrTfojjwF05mqkXKmH+LH8f8AJAlMdYg0c2WLlrcxnwCkLLxzU5cYmKcZ41LuLtQR3ik+EKjYzBXXyCEzFm6qQEbR2akpXyxvONgrf7pijrgNOi0GeatUt0bUQcAONYw== jhesketh@infra", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTDia7zLp6NB/DdzhGW/4MDgaQ1yemfF+fGFctrSbBZzP2Aj3RUlBh4Mut3bTIqp/PKNMXVZQbvig5nqF3sB87ZPvmk+7WluFFcQN1RIZnvkYXjF64C+G5PkEZOQW9nqEeElSCV2lXgK98FPrGtK6HgQlYxH5RJa6cufRwYLXLsAwfKRcS3P5oRU2KDORNm6uBfUuX0TyPgtEjYsjCWcffoW+E8kvZbx1DKxF4+u0mWSdkg0P40aAY10mHACtJ4hnu7xNa5Z9Oru1rA1KWL5NHISgy9t5zC1/0jWfYi+tqToBgUCyB8stWgNpHh+QJrpS8CoCDzQLBar0ynnOxBfHH2+s9xJapQNi6ZOC3khWkoxUJn2Gs9FXqow3zGSmEuEKbbUvaGC58U4S0xFcZzF+sOzjRJtw66wE2pQN5Pj/Qw09w6gt05g4nxoxkRVCwMLdnyoIY1oFmywJX3xC1Utu2oCNfgZSn78rqVkE9e11LczPNGvYjl6xQo1r254E0w3QBgo+LaTK5FBRCAbJ76n0IBJ8SZe9foPWjKTGlbCevM6KO8lm58/0m0EfMf9457ZM9KhyXwYvnb+iR7huGC+pwgGemJ4D6vjeE9EUNGSq6igg+v+cl1DHOxVb0s0Tx2T6DMh3usB4C1uoNCR303cmzrNZ94KLXRICQArSClQI7OQ== nibz@hertz", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSR2NmJC8PSanHUpKJuaMmohG80COO2IPkE3Mxhr7US8P1B3p1c6lOrT6M1txRzBY8FlbxfOinGtutP+ADCB2taXfpO8UiaG9eOqojAT/PeP2Y2ov72rVMSWupLozUv2uAR5yyFVFHOjKPYGAa01aJtfzfJujSak8dM0ifFeFwgp/8RBGEfC7atq+45TdrfAURRcEgcOLiF5Aq6fprCOwpllnrH6VoId9YS7u/5xF2/zBjr9PuOP7jEgCaL/+FNqu7jgj87aG5jiZPlweb7GTLJON9H6eFpyfpoJE0sZ1yR9Q+e9FAqQIA44Zi748qKBlFKbLxzoC4mc0SbNUAleEL yolanda@infra", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1CW5E87v8o7O8B5fe7j1uaPCToRdaBukjH2HzQZ+DSGTIPjirLpp5ZXPuyNnmtRMzwld6mlHYlevVEwuZTNyQwS7ut5o0LjyW6yoEcvPq0xMEZLxaso5dZAtzNgf3FzbtaUYBnkhSwX7c24lf8wPGAl7TC3yO0dePQh2lXVdaBiGB9ybVeQr+kwJIxleUE4puuQ+ONJE2D+hHjoQ/huUMpb996pb/YzkjkAxqHguMid0c1taelyW8n17nEDoWvlV9Qqbo8cerhgURo1OBt2zENLjQQ0kOkPxJx4qx3652e0kbkr11y50r9BMs418mnJdWselMxkSqQNZ+XotoH5Dwn+3K2a6Wv4OX3Dqb9SF/JTD7lA/tIkNfxgsRlzfEQ01rK1+g7Je10EnDCLEzHpFjvZ5q4EEMcYqY+osLFpHAOWGLMx+3eY4pz/xEzRP/x3sjGU09uNOZ3oCWUfSkE4xebnnWtxwWZKyFmv3GHtaqJn2UvpAbODPEYyYcOS3XV3zd233W3C09YYnFUyZbGLXpD05Yet5fZfGTnveMRn5/9LZai+dBPwoMWUJdX4yPnGXgOG8zk0u1nWfcNJfYg+xajSUDiMKjDhlkuFK/GXNYuINe42s1TxzL7pJ4X4UhqLiopeJvPg/U5xdCV5pxVKf1MVenrGe2pfwf1Yr2WMv5w== rcarrillocruz@infra", "", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOjz+dkwRWTJcW9Gt3iGHSzRBsvVlTAK6G2oH3+0D41 iwienand+osinfra@redhat.com", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdzEzB2KpNLTTFJGLCNMY53sja37PXFzHHdjWEGaZtaTcuCn/ufV9ql5yhS5/414u9swoHM71H00+nT4uSWcXc2tTRXYWslaiwU47DOtQsD//CvGgIFBNO1EinWhYa5uTSfxI+Z/x4PBu7XFq5wi/JCfJ+iHIWsvXn8U44r1csURcZU0GMPAVG1MO+s3p1W7daVqF9RR7UuwCECb3hdPN1N/M4s6myBiuRXCeDND98dKLf8b342hw+pWvQ3g/OCLcVlYPWT4fy1YGQT8hT+jA2XPfwCtu/k7HKAGH3E8UcnBtY/RI9ibciIFe+Ro7q8t+tp5SgjGLq1NnE4Yp5rpsh david@koala", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCuP0CZE8AYnbm8gxecCxKeRw0wHRyryd+FKmNNsdr0d3UvfCbqNzLigrqEBZsKpofi3M4qCWNpKRyfhnjPynLTQjP1vnX9AbL9UGoiHxScfvh3skntTYMs9ezJRd0rMJJZO76FPo8bJLDlwxAQl8m/nuj3HfYiO5hYE7P+a3rhsJh4nEfBb7xh+Q5yM0PWObkkBl6IRiBYjlcsXNZHgTA5kNuihUk5bHqAw54sHh05DhpgOITpTw4LFbh4Ew2NKq49dEb2xbTuAyAr2DHNOGgIwKEZpwtKZEIGEuiLbb4DQRsfivrvyOjnK2NFjQzGyNOHfsOldWHRQwUKUs8nrxKdXvqcrfMnSVaibeYK2TRL+6jd9kc5SIhWI3XLm7HbX7uXMD7/JQrkL25Rcs6nndDCH72DJLz+ynA/T5umMbNBQ9tybL5z73IOpfShRGjQYego22CxDOy7e/5OEMHNoksbFb1S02viM9O2puS7LDqqfT9JIbbPqCrbRi/zOXo0f4EXo6xKUAmd8qlV+6f/p57/qFihzQDaRFVlFEH3k7qwsw7PYGUTwkPaThe6xyZN6D5jqxCZU3aSYu+FGb0oYo+M5IxOm0Cb4NNsvvkRPxWtwSayfFGu6+m/+/RyA3GBcAMev7AuyKN+K2vGMsLagHOx4i+5ZAcUwGzLeXAENNum3w== pabelanger@redhat.com"]}}, "ramdisk-id"=>"None", "reservation-id"=>"r-8znaan6a", "security-groups"=>nil}
0.000 | 3319: ec2_placement_availability_zone => nova
0.000 | 3320: ec2_public_hostname => centos-7-2-node-ovh-gra1-10937570
0.876 | 3321: ec2_public_keys_0_openssh_key_0 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLsTZJ8hXTmzjKxYh/7V07mIy8xl2HL+9BaUlt6A6TMsL3LSvaVQNSgmXX5g0XfPWSCKmkZb1O28q49jQI2n7n7+sHkxn0dJDxj1N2oNrzNY7pDuPrdtCijczLFdievygXNhXNkQ2WIqHXDquN/jfLLJ9L0jxtxtsUMbiL2xxZEZcaf/K5MqyPhscpqiVNE1MjE4xgPbIbv8gCKtPpYIIrktOMb4JbV7rhOp5DcSP5gXtLhOF5fbBpZ+szqrTVUcBX0oTYr3iRfOje9WPsTZIk9vBfBtF416mCNxMSRc7KhSW727AnUu85hS0xiP0MRAf69KemG1OE1pW+LtDIAEYp mordred@camelot
0.000 | 3322: ec2_public_keys_0_openssh_key_1 =>
0.000 | 3323: ec2_public_keys_0_openssh_key_10 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL9x1rhTVOEQEanrN+ecycaDtAbbh3kr41Rxx7galtLq0JwftjsZqv2Vwl9c8ARmm8HiHcLwDoaZB9gvs6teMScCB+5a1fcohiycJBl2olNFRzkGapDaTvl74aLXQBWaV84D8tUavEl26zcgwrv9WLUsy9pnHoo5K0BzbK7vT2g3VictCphveC2vdjCDeptocWvt4zxCmAY6O7QMKeUjKMlvuy+zCohJcR4BbDnw8EriFAmCeQZcAgfLTyeAvjo384NNIFWyhCwvbCLvpgTplMCp896DWLlXu9eaGUCNjT/sZM8zafAXbfc6OKYFQ5iANAiJktWwKaUaphJkbSVWT5 elizabeth@r2d2
0.000 | 3324: ec2_public_keys_0_openssh_key_11 =>
0.809 | 3325: ec2_public_keys_0_openssh_key_12 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3onVLOZiiGpQWTCIV0QwHmc3Jvqyl7UaJxIu7D49OQcLHqVZsozI9pSiCdTnWyAaM+E+5wD9yVcSTqMWqn2AZmZSwQ+Fh6KnCgPZ/o63+iCZPGL0RNk20M1iNh5dvdStDnn+j2fpeV/JONF0tBn07QvNL2eF4BwtbTG9Zhl186QNsXjXDghrSO3Etl6DSfcUhxyvMoA2LnclWWD5hLmiRhcBm+PIxveVsr4B+o0k1HV5SUOvJMWtbEC37AH5I818O4fNOob6CnOFaCsbA9oUDzB5rqxutPZb9SmNJpNoLqYqDgyppM0yeql0Kn97tUt7H4j5xHrWoGnJ4IXfuDc0AMmmy4fpcLGkNf7zcBftKS6iz/3AlOXjlp5WZvKxngJj9HIir2SE/qV4Lxw9936BzvAcQyw5+bEsLQJwi+LPZxEqLC6oklkX9dg/+1yBFHsz6mulA0b4Eq7VF9omRzrhhN4iPpU5KQYPRNz7yRYckXDxYnp2lz6yHgSYh2/lqMc+UqmCL9EAWcDw3jsgvJ6kH/YUVUojiRHD9QLqlhOusu1wrTfojjwF05mqkXKmH+LH8f8AJAlMdYg0c2WLlrcxnwCkLLxzU5cYmKcZ41LuLtQR3ik+EKjYzBXXyCEzFm6qQEbR2akpXyxvONgrf7pijrgNOi0GeatUt0bUQcAONYw== jhesketh@infra
0.000 | 3326: ec2_public_keys_0_openssh_key_13 =>
0.876 | 3327: ec2_public_keys_0_openssh_key_14 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTDia7zLp6NB/DdzhGW/4MDgaQ1yemfF+fGFctrSbBZzP2Aj3RUlBh4Mut3bTIqp/PKNMXVZQbvig5nqF3sB87ZPvmk+7WluFFcQN1RIZnvkYXjF64C+G5PkEZOQW9nqEeElSCV2lXgK98FPrGtK6HgQlYxH5RJa6cufRwYLXLsAwfKRcS3P5oRU2KDORNm6uBfUuX0TyPgtEjYsjCWcffoW+E8kvZbx1DKxF4+u0mWSdkg0P40aAY10mHACtJ4hnu7xNa5Z9Oru1rA1KWL5NHISgy9t5zC1/0jWfYi+tqToBgUCyB8stWgNpHh+QJrpS8CoCDzQLBar0ynnOxBfHH2+s9xJapQNi6ZOC3khWkoxUJn2Gs9FXqow3zGSmEuEKbbUvaGC58U4S0xFcZzF+sOzjRJtw66wE2pQN5Pj/Qw09w6gt05g4nxoxkRVCwMLdnyoIY1oFmywJX3xC1Utu2oCNfgZSn78rqVkE9e11LczPNGvYjl6xQo1r254E0w3QBgo+LaTK5FBRCAbJ76n0IBJ8SZe9foPWjKTGlbCevM6KO8lm58/0m0EfMf9457ZM9KhyXwYvnb+iR7huGC+pwgGemJ4D6vjeE9EUNGSq6igg+v+cl1DHOxVb0s0Tx2T6DMh3usB4C1uoNCR303cmzrNZ94KLXRICQArSClQI7OQ== nibz@hertz
0.000 | 3328: ec2_public_keys_0_openssh_key_15 =>
0.809 | 3329: ec2_public_keys_0_openssh_key_16 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSR2NmJC8PSanHUpKJuaMmohG80COO2IPkE3Mxhr7US8P1B3p1c6lOrT6M1txRzBY8FlbxfOinGtutP+ADCB2taXfpO8UiaG9eOqojAT/PeP2Y2ov72rVMSWupLozUv2uAR5yyFVFHOjKPYGAa01aJtfzfJujSak8dM0ifFeFwgp/8RBGEfC7atq+45TdrfAURRcEgcOLiF5Aq6fprCOwpllnrH6VoId9YS7u/5xF2/zBjr9PuOP7jEgCaL/+FNqu7jgj87aG5jiZPlweb7GTLJON9H6eFpyfpoJE0sZ1yR9Q+e9FAqQIA44Zi748qKBlFKbLxzoC4mc0SbNUAleEL yolanda@infra
0.000 | 3330: ec2_public_keys_0_openssh_key_17 =>
0.809 | 3331: ec2_public_keys_0_openssh_key_18 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1CW5E87v8o7O8B5fe7j1uaPCToRdaBukjH2HzQZ+DSGTIPjirLpp5ZXPuyNnmtRMzwld6mlHYlevVEwuZTNyQwS7ut5o0LjyW6yoEcvPq0xMEZLxaso5dZAtzNgf3FzbtaUYBnkhSwX7c24lf8wPGAl7TC3yO0dePQh2lXVdaBiGB9ybVeQr+kwJIxleUE4puuQ+ONJE2D+hHjoQ/huUMpb996pb/YzkjkAxqHguMid0c1taelyW8n17nEDoWvlV9Qqbo8cerhgURo1OBt2zENLjQQ0kOkPxJx4qx3652e0kbkr11y50r9BMs418mnJdWselMxkSqQNZ+XotoH5Dwn+3K2a6Wv4OX3Dqb9SF/JTD7lA/tIkNfxgsRlzfEQ01rK1+g7Je10EnDCLEzHpFjvZ5q4EEMcYqY+osLFpHAOWGLMx+3eY4pz/xEzRP/x3sjGU09uNOZ3oCWUfSkE4xebnnWtxwWZKyFmv3GHtaqJn2UvpAbODPEYyYcOS3XV3zd233W3C09YYnFUyZbGLXpD05Yet5fZfGTnveMRn5/9LZai+dBPwoMWUJdX4yPnGXgOG8zk0u1nWfcNJfYg+xajSUDiMKjDhlkuFK/GXNYuINe42s1TxzL7pJ4X4UhqLiopeJvPg/U5xdCV5pxVKf1MVenrGe2pfwf1Yr2WMv5w== rcarrillocruz@infra
0.000 | 3332: ec2_public_keys_0_openssh_key_19 =>
0.848 | 3333: ec2_public_keys_0_openssh_key_2 => ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvKYcWK1T7e3PKSFiqb03EYktnoxVASpPoq2rJw2JvhsP0JfS+lKrPzpUQv7L4JCuQMsPNtZ8LnwVEft39k58Kh8XMebSfaqPYAZS5zCNvQUQIhP9myOevBZf4CDeG+gmssqRFcWEwIllfDuIzKBQGVbomR+Y5QuW0HczIbkoOYI6iyf2jB6xg+bmzR2HViofNrSa62CYmHS6dO04Z95J27w6jGWpEOTBjEQvnb9sdBc4EzaBVmxCpa2EilB1u0th7/DvuH0yP4T+X8G8UjW1gZCTOVw06fqlBCST4KjdWw1F/AuOCT7048klbf4H+mCTaEcPzzu3Fkv8ckMWtS/Z9Q== jeblair@operational-necessity
0.519 | 3334: ec2_public_keys_0_openssh_key_20 => ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOjz+dkwRWTJcW9Gt3iGHSzRBsvVlTAK6G2oH3+0D41 iwienand+osinfra@redhat.com
0.000 | 3335: ec2_public_keys_0_openssh_key_21 =>
0.876 | 3336: ec2_public_keys_0_openssh_key_22 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdzEzB2KpNLTTFJGLCNMY53sja37PXFzHHdjWEGaZtaTcuCn/ufV9ql5yhS5/414u9swoHM71H00+nT4uSWcXc2tTRXYWslaiwU47DOtQsD//CvGgIFBNO1EinWhYa5uTSfxI+Z/x4PBu7XFq5wi/JCfJ+iHIWsvXn8U44r1csURcZU0GMPAVG1MO+s3p1W7daVqF9RR7UuwCECb3hdPN1N/M4s6myBiuRXCeDND98dKLf8b342hw+pWvQ3g/OCLcVlYPWT4fy1YGQT8hT+jA2XPfwCtu/k7HKAGH3E8UcnBtY/RI9ibciIFe+Ro7q8t+tp5SgjGLq1NnE4Yp5rpsh david@koala
0.000 | 3337: ec2_public_keys_0_openssh_key_23 =>
0.439 | 3338: ec2_public_keys_0_openssh_key_24 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCuP0CZE8AYnbm8gxecCxKeRw0wHRyryd+FKmNNsdr0d3UvfCbqNzLigrqEBZsKpofi3M4qCWNpKRyfhnjPynLTQjP1vnX9AbL9UGoiHxScfvh3skntTYMs9ezJRd0rMJJZO76FPo8bJLDlwxAQl8m/nuj3HfYiO5hYE7P+a3rhsJh4nEfBb7xh+Q5yM0PWObkkBl6IRiBYjlcsXNZHgTA5kNuihUk5bHqAw54sHh05DhpgOITpTw4LFbh4Ew2NKq49dEb2xbTuAyAr2DHNOGgIwKEZpwtKZEIGEuiLbb4DQRsfivrvyOjnK2NFjQzGyNOHfsOldWHRQwUKUs8nrxKdXvqcrfMnSVaibeYK2TRL+6jd9kc5SIhWI3XLm7HbX7uXMD7/JQrkL25Rcs6nndDCH72DJLz+ynA/T5umMbNBQ9tybL5z73IOpfShRGjQYego22CxDOy7e/5OEMHNoksbFb1S02viM9O2puS7LDqqfT9JIbbPqCrbRi/zOXo0f4EXo6xKUAmd8qlV+6f/p57/qFihzQDaRFVlFEH3k7qwsw7PYGUTwkPaThe6xyZN6D5jqxCZU3aSYu+FGb0oYo+M5IxOm0Cb4NNsvvkRPxWtwSayfFGu6+m/+/RyA3GBcAMev7AuyKN+K2vGMsLagHOx4i+5ZAcUwGzLeXAENNum3w== pabelanger@redhat.com
0.000 | 3339: ec2_public_keys_0_openssh_key_3 =>
0.876 | 3340: ec2_public_keys_0_openssh_key_4 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCnfoVhOTkrY7uoebL8PoHXb0Fg4jJqGCbwkxUdNUdheIdbnfyjuRG3iL8WZnzf7nzWnD+IGo6kkAo8BkNMK9L0P0Y+5IjI8NH49KU22tQ1umij4EIf5tzLh4gsqkJmy6QLrlbf10m6UF4rLFQhKzOd4b2H2K6KbP00CIymvbW3BwvNDODM4xRE2uao387qfvXZBUkB0PpRD+7fWPoN58gpFUm407Eba3WwX5PCD+1DD+RVBsG8maIDXerQ7lvFLoSuyMswv1TfkvCj0ZFhSFbfTd2ZysCu6eryFfeixR7NY9SNcp9YTqG6LrxGA7Ci6wz+hycFHXlDrlBgfFJDe5At clark@work
0.000 | 3341: ec2_public_keys_0_openssh_key_5 =>
0.701 | 3342: ec2_public_keys_0_openssh_key_6 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3KnRBTH5QPpKjf4RWu4akzYt2gwp796cMkFl5vu8e7G/cHuh4979FeNJXMVP6F3rvZB+yXDHLCU5LBVLq0K+1GbAZT/hH38hpMOIvniwKIquvI6C/drkVPHO6YmVlapw/NI530PGnT/TAqCOycHBO5eF1bYsaqV1yZqvs9v7UZc6J4LukoLZwpmyWZ5P3ltAiiy8+FGq3SLCKWDMmv/Bjz4zTsaNbSWThJi0BydINjC1/0ze5Tyc/XgW1sDuxmmXJxgQp4EvLpronqb2hT60iA52kj8lrmoCIryRpgnbaRA7BrxKF8zIr0ZALHijxEUeWHhFJDIVRGUf0Ef0nrmBv fungi-openstack-2015
0.000 | 3343: ec2_public_keys_0_openssh_key_7 =>
0.876 | 3344: ec2_public_keys_0_openssh_key_8 => ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHGuIVB/WxBd7k1R8x2FyfqT6KxRnoM7lE5RE8gvBk2r8cQeH5k1c+P5JrBvWpmqXv4satoivYOBiIb7JXEgIxx62YUx/JQ0J7k3w+av6h4iFe2OhOtEOjMF5F8/wO8a/95OeTZPzBZlUfA3hx754kuw3Q/aBKQUOHWxJOIedGyVHeJc7XiFj3RXIufFuUfng9+p4Z3q6d2/WpuKqs00WI0CLF17PkU4i8P9CraJR1dmsWW6zoxMT2G+DwMFI7ZMS3xrVBRuLwrLlbylVLW2kOJ0JeyjHnRh7X1kR7KG3cGOOjA1YQ0e+mXvremcO3/3o6Iop/N1AtqVuYCKlZc7Y9 slukjanov@mirantis.com
0.000 | 3345: ec2_public_keys_0_openssh_key_9 =>

0.000 | 3412: partitions => {"vda1"=>{"uuid"=>"2d930d32-4be2-4440-8fec-05114b535f83", "size"=>"167770079", "mount"=>"/", "label"=>"cloudimg-rootfs", "filesystem"=>"ext4"}}
0.000 | 3413: path => /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin/
0.000 | 3414: physicalprocessorcount => 8
0.512 | 3415: processor0 => Intel Core Processor (Haswell, no TSX)
0.512 | 3416: processor1 => Intel Core Processor (Haswell, no TSX)
0.512 | 3417: processor2 => Intel Core Processor (Haswell, no TSX)
0.512 | 3418: processor3 => Intel Core Processor (Haswell, no TSX)
0.512 | 3419: processor4 => Intel Core Processor (Haswell, no TSX)
0.512 | 3420: processor5 => Intel Core Processor (Haswell, no TSX)
0.512 | 3421: processor6 => Intel Core Processor (Haswell, no TSX)
0.512 | 3422: processor7 => Intel Core Processor (Haswell, no TSX)
0.000 | 3423: processorcount => 8
0.492 | 3424: processors => {"models"=>["Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)"], "count"=>8, "physicalcount"=>8}
0.000 | 3425: productname => OpenStack Nova

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/console.html
0.000 | 0020: 2017-09-14 18:00:31.687528 | 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
0.000 | 0021: 2017-09-14 18:00:31.687566 | link/ether fa:16:3e:b8:8b:c3 brd ff:ff:ff:ff:ff:ff
0.000 | 0022: 2017-09-14 18:00:31.687607 | inet 149.202.161.189/32 brd 149.202.161.189 scope global dynamic eth0
0.423 | 0023: 2017-09-14 18:00:31.687660 | valid_lft 86306sec preferred_lft 86306sec
0.000 | 0024: 2017-09-14 18:00:31.687697 | inet6 fe80::f816:3eff:feb8:8bc3/64 scope link

0.041 | 0580: 2017-09-14 18:16:32.815410 | ++(/etc/ci/mirror_info.sh:57): NODEPOOL_UCA_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-cloud-archive
0.044 | 0581: 2017-09-14 18:16:32.815527 | ++(/etc/ci/mirror_info.sh:58): export NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.047 | 0582: 2017-09-14 18:16:32.815607 | ++(/etc/ci/mirror_info.sh:58): NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.207 | 0583: 2017-09-14 18:16:32.815758 | ++(/etc/ci/mirror_info.sh:60): export NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.227 | 0584: 2017-09-14 18:16:32.815849 | ++(/etc/ci/mirror_info.sh:60): NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.182 | 0585: 2017-09-14 18:16:32.815982 | ++(/etc/ci/mirror_info.sh:61): export NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.200 | 0586: 2017-09-14 18:16:32.816081 | ++(/etc/ci/mirror_info.sh:61): NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.195 | 0587: 2017-09-14 18:16:32.816192 | ++(/etc/ci/mirror_info.sh:62): export NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.214 | 0588: 2017-09-14 18:16:32.816278 | ++(/etc/ci/mirror_info.sh:62): NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.031 | 0589: 2017-09-14 18:16:32.816415 | ++(/etc/ci/mirror_info.sh:63): export NODEPOOL_RUGYGEMS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rubygems/

0.000 | 3900: 2017-09-14 19:48:14.664534 | TASK [validate-tempest : Tempest failed if rc code is not 0] *******************
0.000 | 3901: 2017-09-14 19:48:14.664693 | task path: /home/jenkins/workspace/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/.quickstart/usr/local/share/ansible/roles/validate-tempest/tasks/tempest-status.yml:2
0.000 | 3902: 2017-09-14 19:48:14.736017 | Thursday 14 September 2017 19:48:14 +0000 (0:00:00.728) 1:25:40.808 ****
0.247 | 3903: 2017-09-14 19:48:14.791462 | ok: [undercloud] => {"ansible_facts": {"tempest_status": "failed"}, "changed": false}
0.000 | 3904: 2017-09-14 19:48:14.826861 |

0.000 | 3910: 2017-09-14 19:48:15.096901 | TASK [validate-tempest : import tempest_status from file] **********************
0.000 | 3911: 2017-09-14 19:48:15.096985 | task path: /home/jenkins/workspace/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/.quickstart/usr/local/share/ansible/roles/validate-tempest/tasks/tempest-status.yml:13
0.000 | 3912: 2017-09-14 19:48:15.139554 | Thursday 14 September 2017 19:48:15 +0000 (0:00:00.277) 1:25:41.212 ****
0.247 | 3913: 2017-09-14 19:48:15.196529 | ok: [undercloud] => {"ansible_facts": {"tempest_status": "failed"}, "changed": false}
0.000 | 3914: 2017-09-14 19:48:15.227856 |

0.000 | 3916: 2017-09-14 19:48:15.228087 | task path: /home/jenkins/workspace/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/.quickstart/usr/local/share/ansible/roles/validate-tempest/tasks/tempest-status.yml:16
0.000 | 3917: 2017-09-14 19:48:15.265673 | Thursday 14 September 2017 19:48:15 +0000 (0:00:00.126) 1:25:41.338 ****
0.000 | 3918: 2017-09-14 19:48:15.320833 | ok: [undercloud] => {
0.326 | 3919: 2017-09-14 19:48:15.320908 | "tempest_status": "failed"
0.000 | 3920: 2017-09-14 19:48:15.320927 | }

0.000 | 3973: 2017-09-14 19:48:29.340084 | TASK [validate-tempest : Exit with tempest result code if configured] **********
0.000 | 3974: 2017-09-14 19:48:29.340329 | task path: /home/jenkins/workspace/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/.quickstart/usr/local/share/ansible/roles/validate-tempest/tasks/tempest-results.yml:72
0.000 | 3975: 2017-09-14 19:48:29.400930 | Thursday 14 September 2017 19:48:29 +0000 (0:00:02.222) 1:25:55.473 ****
0.805 | 3976: 2017-09-14 19:48:30.016029 | fatal: [undercloud]: FAILED! => {"changed": true, "cmd": "tail -10 tempest_output.log; exit 1", "delta": "0:00:00.009406", "end": "2017-09-14 19:48:29.990797", "failed": true, "rc": 1, "start": "2017-09-14 19:48:29.981391", "stderr": "", "stdout": "2017-09-14 19:47:51 | - Worker 0 (1 tests) => 0:00:17.706163
0.805 | 3976: 2017-09-14 19:47:51 |
0.805 | 3976: 2017-09-14 19:47:51 | No tests were successful during the run
0.805 | 3976: 2017-09-14 19:47:52 | Test id Runtime (s)
0.805 | 3976: 2017-09-14 19:47:52 | ------------------------------------------------------------------------------------------------------------------------------------------------- -----------
0.805 | 3976: 2017-09-14 19:47:52 | tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke] 17.706
0.805 | 3976: 2017-09-14 19:47:52 |
0.805 | 3976: 2017-09-14 19:47:52 | Slowest Tests:
0.805 | 3976: 2017-09-14 19:47:52 |
0.805 | 3976: tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke] (subunit.RemotedTestCase)", "stdout_lines": ["2017-09-14 19:47:51 | - Worker 0 (1 tests) => 0:00:17.706163", "2017-09-14 19:47:51 | ", "2017-09-14 19:47:51 | No tests were successful during the run", "2017-09-14 19:47:52 | Test id Runtime (s)", "2017-09-14 19:47:52 | ------------------------------------------------------------------------------------------------------------------------------------------------- -----------", "2017-09-14 19:47:52 | tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke] 17.706", "2017-09-14 19:47:52 | ", "2017-09-14 19:47:52 | Slowest Tests:", "2017-09-14 19:47:52 | ", "tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke] (subunit.RemotedTestCase)"], "warnings": []}
0.000 | 3977: 2017-09-14 19:48:30.057969 |

0.000 | 4070: 2017-09-14 19:52:26.383331 | +(./toci_quickstart.sh:132): echo 'Quickstart completed.'
0.000 | 4071: 2017-09-14 19:52:26.383347 | Quickstart completed.
0.000 | 4072: 2017-09-14 19:52:26.383367 | +(./toci_quickstart.sh:133): exit 2
0.525 | 4073: 2017-09-14 19:52:26.399576 | ERROR: the main setup script run by this job failed - exit code: 2
0.580 | 4074: 2017-09-14 19:52:26.399920 | please look at the relevant log files to determine the root cause
0.472 | 4075: 2017-09-14 19:52:26.399953 | Running devstack worlddump.py
0.000 | 4076: 2017-09-14 19:52:32.935716 | Cleaning up host

0.000 | 4264: 2017-09-14 19:53:21.698878 | }
0.000 | 4265: 2017-09-14 19:53:22.797688 | Generating static files at /home/jenkins/workspace/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/logs/ara...
0.000 | 4266: 2017-09-14 19:53:25.656685 | Done.
0.592 | 4267: 2017-09-14 19:53:27.671254 | *** FAILED with status: 2
0.000 | 4268: 2017-09-14 19:53:27.786927 | [Zuul] Task exit code: 2

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/overcloud_deploy.log.txt.gz
0.000 | 0041: 2017-09-14 19:00:38 | Waiting for messages on queue '4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd' with no timeout.
0.000 | 0042: 2017-09-14 19:00:57 | Configuration has 8 errors, fix them before proceeding. Ignoring these errors is likely to lead to a failed deploy.
0.000 | 0043: 2017-09-14 19:02:56 | Started Mistral Workflow tripleo.validations.v1.check_pre_deployment_validations. Execution ID: cfeab334-cf05-4cb5-ba81-0b387e1d0cf8
0.355 | 0044: 2017-09-14 19:02:56 | {u'kernel_id': None, u'ramdisk_id': None, u'errors': [u"No image with the name 'bm-deploy-kernel' found - make sure you have uploaded boot images.", u"No image with the name 'bm-deploy-ramdisk' found - make sure you have uploaded boot images."], u'warnings': []}
0.000 | 0045: 2017-09-14 19:02:56 | {u'errors': [u'Error: There are no nodes in an available or active state and with maintenance mode off.'], u'warnings': []}

0.000 | 0267: 2017-09-14 19:04:42 | 2017-09-14 19:04:27Z [overcloud.ControllerServiceConfigSettings]: CREATE_COMPLETE state changed
0.000 | 0268: 2017-09-14 19:04:42 | 2017-09-14 19:04:27Z [overcloud.Controller]: CREATE_IN_PROGRESS state changed
0.000 | 0269: 2017-09-14 19:04:42 | 2017-09-14 19:04:30Z [overcloud.ControllerMergedConfigSettings]: CREATE_IN_PROGRESS state changed
0.289 | 0270: 2017-09-14 19:04:42 | 2017-09-14 19:04:30Z [0]: CREATE_IN_PROGRESS state changed
0.000 | 0271: 2017-09-14 19:04:42 | 2017-09-14 19:04:31Z [overcloud.ControllerMergedConfigSettings]: CREATE_COMPLETE state changed
0.016 | 0272: 2017-09-14 19:04:42 | 2017-09-14 19:04:35Z [0]: CREATE_IN_PROGRESS Stack CREATE started
0.289 | 0273: 2017-09-14 19:04:42 | 2017-09-14 19:04:35Z [0.DeploymentActions]: CREATE_IN_PROGRESS state changed
0.289 | 0274: 2017-09-14 19:04:42 | 2017-09-14 19:04:35Z [0.UpdateConfig]: CREATE_IN_PROGRESS state changed
0.289 | 0275: 2017-09-14 19:04:42 | 2017-09-14 19:04:36Z [0.ControllerUpgradeInitConfig]: CREATE_IN_PROGRESS state changed
0.289 | 0276: 2017-09-14 19:04:42 | 2017-09-14 19:04:36Z [0.NodeUserData]: CREATE_IN_PROGRESS state changed
0.289 | 0277: 2017-09-14 19:04:42 | 2017-09-14 19:04:37Z [0.RoleUserData]: CREATE_IN_PROGRESS state changed
0.289 | 0278: 2017-09-14 19:04:42 | 2017-09-14 19:04:38Z [0.NodeAdminUserData]: CREATE_IN_PROGRESS state changed
0.290 | 0279: 2017-09-14 19:10:11 | 2017-09-14 19:04:39Z [0.UpdateConfig]: CREATE_COMPLETE state changed
0.290 | 0280: 2017-09-14 19:10:11 | 2017-09-14 19:04:39Z [0.NodeUserData]: CREATE_COMPLETE state changed
0.290 | 0281: 2017-09-14 19:10:11 | 2017-09-14 19:04:39Z [0.DeploymentActions]: CREATE_COMPLETE state changed
0.290 | 0282: 2017-09-14 19:10:11 | 2017-09-14 19:04:39Z [0.ControllerUpgradeInitConfig]: CREATE_COMPLETE state changed
0.290 | 0283: 2017-09-14 19:10:11 | 2017-09-14 19:04:39Z [0.RoleUserData]: CREATE_COMPLETE state changed
0.290 | 0284: 2017-09-14 19:10:11 | 2017-09-14 19:04:41Z [0.NodeAdminUserData]: CREATE_COMPLETE state changed
0.289 | 0285: 2017-09-14 19:10:11 | 2017-09-14 19:04:41Z [0.UserData]: CREATE_IN_PROGRESS state changed
0.290 | 0286: 2017-09-14 19:10:11 | 2017-09-14 19:04:42Z [0.UserData]: CREATE_COMPLETE state changed
0.289 | 0287: 2017-09-14 19:10:11 | 2017-09-14 19:04:42Z [0.Controller]: CREATE_IN_PROGRESS state changed
0.290 | 0288: 2017-09-14 19:10:11 | 2017-09-14 19:08:23Z [0.Controller]: CREATE_COMPLETE state changed
0.289 | 0289: 2017-09-14 19:10:11 | 2017-09-14 19:08:24Z [0.PreNetworkConfig]: CREATE_IN_PROGRESS state changed
0.289 | 0290: 2017-09-14 19:10:11 | 2017-09-14 19:08:24Z [0.ExternalPort]: CREATE_IN_PROGRESS state changed
0.289 | 0291: 2017-09-14 19:10:11 | 2017-09-14 19:08:25Z [0.TenantPort]: CREATE_IN_PROGRESS state changed
0.289 | 0292: 2017-09-14 19:10:11 | 2017-09-14 19:08:26Z [0.StoragePort]: CREATE_IN_PROGRESS state changed
0.289 | 0293: 2017-09-14 19:10:11 | 2017-09-14 19:08:26Z [0.StorageMgmtPort]: CREATE_IN_PROGRESS state changed
0.289 | 0294: 2017-09-14 19:10:11 | 2017-09-14 19:08:27Z [0.InternalApiPort]: CREATE_IN_PROGRESS state changed
0.289 | 0295: 2017-09-14 19:10:11 | 2017-09-14 19:08:28Z [0.NetHostMap]: CREATE_IN_PROGRESS state changed
0.289 | 0296: 2017-09-14 19:10:11 | 2017-09-14 19:08:28Z [0.ManagementPort]: CREATE_IN_PROGRESS state changed
0.290 | 0297: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.StoragePort]: CREATE_COMPLETE state changed
0.290 | 0298: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.InternalApiPort]: CREATE_COMPLETE state changed
0.290 | 0299: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.PreNetworkConfig]: CREATE_COMPLETE state changed
0.290 | 0300: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.TenantPort]: CREATE_COMPLETE state changed
0.290 | 0301: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.StorageMgmtPort]: CREATE_COMPLETE state changed
0.290 | 0302: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.ExternalPort]: CREATE_COMPLETE state changed
0.290 | 0303: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.NetHostMap]: CREATE_COMPLETE state changed
0.290 | 0304: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.ManagementPort]: CREATE_COMPLETE state changed
0.289 | 0305: 2017-09-14 19:10:11 | 2017-09-14 19:08:30Z [0.NetIpMap]: CREATE_IN_PROGRESS state changed
0.289 | 0306: 2017-09-14 19:10:11 | 2017-09-14 19:08:31Z [0.NetworkConfig]: CREATE_IN_PROGRESS state changed
0.290 | 0307: 2017-09-14 19:10:11 | 2017-09-14 19:08:33Z [0.NetIpMap]: CREATE_COMPLETE state changed
0.290 | 0308: 2017-09-14 19:10:11 | 2017-09-14 19:08:33Z [0.NetworkConfig]: CREATE_COMPLETE state changed
0.289 | 0309: 2017-09-14 19:10:11 | 2017-09-14 19:08:33Z [0.ControllerConfig]: CREATE_IN_PROGRESS state changed
0.289 | 0310: 2017-09-14 19:10:11 | 2017-09-14 19:08:33Z [0.NetworkDeployment]: CREATE_IN_PROGRESS state changed
0.290 | 0311: 2017-09-14 19:10:11 | 2017-09-14 19:08:35Z [0.ControllerConfig]: CREATE_COMPLETE state changed
0.057 | 0312: 2017-09-14 19:10:11 | 2017-09-14 19:08:53Z [0.NetworkDeployment]: SIGNAL_IN_PROGRESS Signal: deployment 7f42c473-b9e7-4c83-a192-109c7b0268dc succeeded
0.290 | 0313: 2017-09-14 19:10:11 | 2017-09-14 19:08:54Z [0.NetworkDeployment]: CREATE_COMPLETE state changed
0.289 | 0314: 2017-09-14 19:10:11 | 2017-09-14 19:08:54Z [0.UpdateDeployment]: CREATE_IN_PROGRESS state changed
0.289 | 0315: 2017-09-14 19:10:11 | 2017-09-14 19:08:55Z [0.ControllerUpgradeInitDeployment]: CREATE_IN_PROGRESS state changed
0.289 | 0316: 2017-09-14 19:10:11 | 2017-09-14 19:08:56Z [0.NodeTLSCAData]: CREATE_IN_PROGRESS state changed
0.290 | 0317: 2017-09-14 19:10:11 | 2017-09-14 19:08:57Z [0.NodeTLSCAData]: CREATE_COMPLETE state changed
0.289 | 0318: 2017-09-14 19:10:11 | 2017-09-14 19:08:57Z [0.NodeTLSData]: CREATE_IN_PROGRESS state changed
0.290 | 0319: 2017-09-14 19:10:11 | 2017-09-14 19:08:58Z [0.NodeTLSData]: CREATE_COMPLETE state changed
0.057 | 0320: 2017-09-14 19:10:11 | 2017-09-14 19:09:26Z [0.ControllerUpgradeInitDeployment]: SIGNAL_IN_PROGRESS Signal: deployment e8823fee-8b43-4917-997a-2e612c151b49 succeeded
0.290 | 0321: 2017-09-14 19:10:11 | 2017-09-14 19:09:27Z [0.ControllerUpgradeInitDeployment]: CREATE_COMPLETE state changed
0.057 | 0322: 2017-09-14 19:10:11 | 2017-09-14 19:09:32Z [0.UpdateDeployment]: SIGNAL_IN_PROGRESS Signal: deployment f6c3bd24-48ae-47f4-b34e-87f30571c17b succeeded
0.290 | 0323: 2017-09-14 19:10:11 | 2017-09-14 19:09:33Z [0.UpdateDeployment]: CREATE_COMPLETE state changed
0.289 | 0324: 2017-09-14 19:10:11 | 2017-09-14 19:09:33Z [0.ControllerDeployment]: CREATE_IN_PROGRESS state changed
0.057 | 0325: 2017-09-14 19:10:11 | 2017-09-14 19:10:06Z [0.ControllerDeployment]: SIGNAL_IN_PROGRESS Signal: deployment c0551f7c-ec82-4678-a5dc-0eb6d77e9032 succeeded
0.290 | 0326: 2017-09-14 19:10:11 | 2017-09-14 19:10:06Z [0.ControllerDeployment]: CREATE_COMPLETE state changed
0.289 | 0327: 2017-09-14 19:10:11 | 2017-09-14 19:10:06Z [0.ControllerExtraConfigPre]: CREATE_IN_PROGRESS state changed
0.289 | 0328: 2017-09-14 19:10:11 | 2017-09-14 19:10:06Z [0.SshHostPubKey]: CREATE_IN_PROGRESS state changed
0.290 | 0329: 2017-09-14 19:10:11 | 2017-09-14 19:10:09Z [0.ControllerExtraConfigPre]: CREATE_COMPLETE state changed
0.289 | 0330: 2017-09-14 19:10:11 | 2017-09-14 19:10:09Z [0.NodeExtraConfig]: CREATE_IN_PROGRESS state changed
0.290 | 0331: 2017-09-14 19:10:59 | 2017-09-14 19:10:11Z [0.NodeExtraConfig]: CREATE_COMPLETE state changed
0.290 | 0332: 2017-09-14 19:10:59 | 2017-09-14 19:10:39Z [0.SshHostPubKey]: CREATE_COMPLETE state changed
0.012 | 0333: 2017-09-14 19:10:59 | 2017-09-14 19:10:39Z [0]: CREATE_COMPLETE Stack CREATE completed successfully
0.290 | 0334: 2017-09-14 19:10:59 | 2017-09-14 19:10:40Z [0]: CREATE_COMPLETE state changed
0.000 | 0335: 2017-09-14 19:10:59 | 2017-09-14 19:10:43Z [overcloud.Controller]: UPDATE_COMPLETE Stack UPDATE completed successfully

0.000 | 0477: 2017-09-14 19:40:06 | 2017-09-14 19:40:05Z [overcloud.AllNodesDeploySteps.ControllerPostConfig]: CREATE_IN_PROGRESS Stack CREATE started
0.000 | 0478: 2017-09-14 19:40:06 | 2017-09-14 19:40:05Z [overcloud.AllNodesDeploySteps.ControllerPostConfig.ControllerPostPuppetMaintenanceModeConfig]: CREATE_IN_PROGRESS state changed
0.072 | 0479: 2017-09-14 19:41:54 | 2017-09-14 19:40:05Z [overcloud.AllNodesDeploySteps.ControllerPostConfig.ControllerPostPuppetMaintenanHost 192.168.24.9 not found in /home/jenkins/.ssh/known_hosts
0.290 | 0480: 2017-09-14 19:41:54 | ceModeConfig]: CREATE_COMPLETE state changed
0.000 | 0481: 2017-09-14 19:41:54 | 2017-09-14 19:40:05Z [overcloud.AllNodesDeploySteps.ControllerPostConfig.ControllerPostPuppetMaintenanceModeDeployment]: CREATE_IN_PROGRESS state changed

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/tempest/tempest.log.txt.gz
0.000 | 1209: 2017-09-14 19:47:42.028 11888 DEBUG tempest.lib.common.rest_client [req-6e55fff0-75a9-4060-ac98-fed799b3fbe1 ] Request - Headers: {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}
0.000 | 1210: Body: {"server": {"name": "tempest-TestNetworkBasicOps-server-966396963", "imageRef": "6c689e91-3408-451e-bc58-7c03eddd0028", "key_name": "tempest-TestNetworkBasicOps-198610858", "flavorRef": "8f773c57-4f7b-49f3-bef7-4fda99ada582", "networks": [{"uuid": "87338d55-159e-4fd4-9429-080e9bfff009"}], "security_groups": [{"name": "tempest-secgroup-smoke-1905469331"}]}}
0.000 | 1211: Response - Headers: {'status': '500', u'content-length': '199', 'content-location': 'http://192.168.24.9:8774/v2.1/servers', u'x-compute-request-id': 'req-6e55fff0-75a9-4060-ac98-fed799b3fbe1', u'vary': 'OpenStack-API-Version,X-OpenStack-Nova-API-Version', u'server': 'Apache', u'openstack-api-version': 'compute 2.1', u'connection': 'close', u'x-openstack-nova-api-version': '2.1', u'date': 'Thu, 14 Sep 2017 19:47:41 GMT', u'content-type': 'application/json; charset=UTF-8', u'x-openstack-request-id': 'req-6e55fff0-75a9-4060-ac98-fed799b3fbe1'}
0.614 | 1212: Body: {"computeFault": {"message": "Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ and attach the Nova API log if possible.
0.614 | 1212: <class 'ConfigParser.ParsingError'>", "code": 500}} _log_request_full /usr/lib/python2.7/site-packages/tempest/lib/common/rest_client.py:425
0.000 | 1213: 2017-09-14 19:47:42.281 11888 INFO tempest.lib.common.rest_client [req-e6cfac6b-6693-4c88-9229-5e03d99e3158 ] Request (TestNetworkBasicOps:_run_cleanups): 204 DELETE http://192.168.24.9:9696/v2.0/security-groups/433b71ba-9604-4c12-9b4e-f63b1b9a2b62 0.243s

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/ps.txt.gz
0.000 | 0081: root 542 2 542 0.0 0.0 0 0 [kworker/1:1H]
0.000 | 0082: root 624 2 624 0.0 0.0 0 0 [kworker/4:1H]
0.000 | 0083: root 631 1 631 0.0 0.0 316 524 /usr/sbin/acpid
0.601 | 0084: root 758 1 758 0.0 0.1 12284 12840 /sbin/dhclient -1 -q -cf /etc/dhcp/dhclient-eth0.conf -lf /var/lib/dhclient/dhclient--eth0.lease -pf /var/run/dhclient-eth0.pid -H centos-7-2-node-ovh-gra1-10937570-899082 eth0
0.601 | 0085: root 981 1 981 0.0 0.1 12284 12952 /sbin/dhclient -1 -q -cf /etc/dhcp/dhclient-eth0.conf -lf /var/lib/dhclient/dhclient--eth0.lease -pf /var/run/dhclient-eth0.pid -H centos-7-2-node-ovh-gra1-10937570-899082 eth0
0.000 | 0086: root 1055 1 1055 0.0 0.0 344 828 /sbin/agetty --noclear tty1 linux

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/dmesg.txt.gz
0.000 | 0056: [ 0.000000] NODE_DATA(0) allocated [mem 0x233fd7000-0x233ffdfff]
0.000 | 0057: [ 0.000000] kvm-clock: Using msrs 4b564d01 and 4b564d00
0.000 | 0058: [ 0.000000] kvm-clock: cpu 0, msr 2:33f87001, primary cpu clock
0.411 | 0059: [ 0.000000] kvm-clock: using sched offset of 1239040549 cycles
0.000 | 0060: [ 0.000000] Zone ranges:

0.000 | 0120: [ 0.000000] Policy zone: Normal
0.000 | 0121: [ 0.000000] Kernel command line: BOOT_IMAGE=/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64 root=LABEL=cloudimg-rootfs ro nofb nomodeset vga=normal console=tty0 console=ttyS0,115200 no_timer_check
0.000 | 0122: [ 0.000000] PID hash table entries: 4096 (order: 3, 32768 bytes)
1.000 | 0123: [ 0.000000] x86/fpu: xstate_offset[2]: 0240, xstate_sizes[2]: 0100
0.646 | 0124: [ 0.000000] xsave: enabled xstate_bv 0x7, cntxt size 0x340 using standard form
0.000 | 0125: [ 0.000000] Memory: 5139612k/9240576k available (6886k kernel code, 1049112k absent, 234036k reserved, 4545k data, 1764k init)

0.000 | 0165: [ 0.404746] Enabled x2apic
0.000 | 0166: [ 0.405756] Switched APIC routing to physical x2apic.
0.000 | 0167: [ 0.407737] ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
0.294 | 0168: [ 0.409024] smpboot: CPU0: Intel Core Processor (Haswell, no TSX) (fam: 06, model: 3c, stepping: 01)
0.556 | 0169: [ 0.411372] TSC deadline timer enabled
0.000 | 0170: [ 0.411384] Performance Events: unsupported p6 CPU model 60 no PMU driver, software events only.

0.000 | 0534: [ 3.318676] virtio-pci 0000:00:03.0: irq 28 for MSI/MSI-X
0.000 | 0535: [ 3.319646] input: PC Speaker as /devices/platform/pcspkr/input/input5
0.000 | 0536: [ 3.335455] sr 1:0:1:0: Attached scsi generic sg0 type 5
0.576 | 0537: [ 3.349393] AES CTR mode by8 optimization enabled
0.000 | 0538: [ 3.358532] [drm] Initialized

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/dmesg.txt.gz
0.000 | 0056: [ 0.000000] NODE_DATA(0) allocated [mem 0x233fd7000-0x233ffdfff]
0.000 | 0057: [ 0.000000] kvm-clock: Using msrs 4b564d01 and 4b564d00
0.000 | 0058: [ 0.000000] kvm-clock: cpu 0, msr 2:33f87001, primary cpu clock
0.411 | 0059: [ 0.000000] kvm-clock: using sched offset of 1200854012 cycles
0.000 | 0060: [ 0.000000] Zone ranges:

0.000 | 0120: [ 0.000000] Policy zone: Normal
0.000 | 0121: [ 0.000000] Kernel command line: BOOT_IMAGE=/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64 root=LABEL=cloudimg-rootfs ro nofb nomodeset vga=normal console=tty0 console=ttyS0,115200 no_timer_check
0.000 | 0122: [ 0.000000] PID hash table entries: 4096 (order: 3, 32768 bytes)
1.000 | 0123: [ 0.000000] x86/fpu: xstate_offset[2]: 0240, xstate_sizes[2]: 0100
0.646 | 0124: [ 0.000000] xsave: enabled xstate_bv 0x7, cntxt size 0x340 using standard form
0.000 | 0125: [ 0.000000] Memory: 5139612k/9240576k available (6886k kernel code, 1049112k absent, 234036k reserved, 4545k data, 1764k init)

0.000 | 0165: [ 0.341704] Enabled x2apic
0.000 | 0166: [ 0.342581] Switched APIC routing to physical x2apic.
0.000 | 0167: [ 0.344384] ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
0.294 | 0168: [ 0.345640] smpboot: CPU0: Intel Core Processor (Haswell, no TSX) (fam: 06, model: 3c, stepping: 01)
0.556 | 0169: [ 0.347706] TSC deadline timer enabled
0.000 | 0170: [ 0.347713] Performance Events: unsupported p6 CPU model 60 no PMU driver, software events only.

0.000 | 0535: [ 2.904796] virtio-pci 0000:00:03.0: irq 27 for MSI/MSI-X
0.000 | 0536: [ 2.904834] virtio-pci 0000:00:03.0: irq 28 for MSI/MSI-X
0.000 | 0537: [ 2.911516] piix4_smbus 0000:00:01.3: SMBus Host Controller at 0x700, revision 0
0.576 | 0538: [ 2.927234] AES CTR mode by8 optimization enabled
0.000 | 0539: [ 2.946260] sr 1:0:1:0: Attached scsi generic sg0 type 5

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/messages.txt.gz
0.000 | 0034: Sep 14 17:58:49 localhost kernel: NODE_DATA(0) allocated [mem 0x233fd7000-0x233ffdfff]
0.000 | 0035: Sep 14 17:58:49 localhost kernel: kvm-clock: Using msrs 4b564d01 and 4b564d00
0.000 | 0036: Sep 14 17:58:49 localhost kernel: kvm-clock: cpu 0, msr 2:33f87001, primary cpu clock
0.329 | 0037: Sep 14 17:58:49 localhost kernel: kvm-clock: using sched offset of 1200854012 cycles
0.000 | 0038: Sep 14 17:58:49 localhost kernel: Zone ranges:

0.000 | 0083: Sep 14 17:58:49 localhost kernel: Kernel command line: BOOT_IMAGE=/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64 root=LABEL=cloudimg-rootfs ro nofb nomodeset vga=normal console=tty0 console=ttyS0,115200 no_timer_check
0.000 | 0084: Sep 14 17:58:49 localhost kernel: PID hash table entries: 4096 (order: 3, 32768 bytes)
0.000 | 0085: Sep 14 17:58:49 localhost kernel: x86/fpu: xstate_offset[2]: 0240, xstate_sizes[2]: 0100
0.510 | 0086: Sep 14 17:58:49 localhost kernel: xsave: enabled xstate_bv 0x7, cntxt size 0x340 using standard form
0.000 | 0087: Sep 14 17:58:49 localhost kernel: Memory: 5139612k/9240576k available (6886k kernel code, 1049112k absent, 234036k reserved, 4545k data, 1764k init)

0.000 | 0126: Sep 14 17:58:49 localhost kernel: Enabled x2apic
0.000 | 0127: Sep 14 17:58:49 localhost kernel: Switched APIC routing to physical x2apic.
0.000 | 0128: Sep 14 17:58:49 localhost kernel: ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
0.268 | 0129: Sep 14 17:58:49 localhost kernel: smpboot: CPU0: Intel Core Processor (Haswell, no TSX) (fam: 06, model: 3c, stepping: 01)
0.000 | 0130: Sep 14 17:58:49 localhost kernel: Performance Events: unsupported p6 CPU model 60 no PMU driver, software events only.

0.000 | 0782: Sep 14 17:58:51 localhost growroot: + resize2fs /dev/vda1
0.000 | 0783: Sep 14 17:58:51 localhost growroot: resize2fs 1.42.9 (28-Dec-2013)
0.000 | 0784: Sep 14 17:58:51 localhost kernel: EXT4-fs (vda1): resizing filesystem from 3265664 to 20971259 blocks
0.519 | 0785: Sep 14 17:58:52 localhost dhclient[701]: DHCPDISCOVER on eth0 to 255.255.255.255 port 67 interval 6 (xid=0x380ac49c)
0.340 | 0786: Sep 14 17:58:52 localhost dhclient[701]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x380ac49c)
0.330 | 0787: Sep 14 17:58:52 localhost dhclient[701]: DHCPOFFER from 149.202.160.1
0.376 | 0788: Sep 14 17:58:52 localhost dhclient[701]: DHCPACK from 149.202.160.1 (xid=0x380ac49c)
0.542 | 0789: Sep 14 17:58:52 localhost dhclient[701]: suspect value in domain_search option - discarded
0.000 | 0790: Sep 14 17:58:52 localhost kernel: EXT4-fs (vda1): resized filesystem to 20971259

0.000 | 0796: Sep 14 17:58:53 localhost ntpd_intres[571]: host name not found: 1.centos.pool.ntp.org
0.000 | 0797: Sep 14 17:58:53 localhost ntpd_intres[571]: host name not found: 2.centos.pool.ntp.org
0.000 | 0798: Sep 14 17:58:53 localhost ntpd_intres[571]: host name not found: 3.centos.pool.ntp.org
0.708 | 0799: Sep 14 17:58:54 localhost NET[753]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.525 | 0800: Sep 14 17:58:54 localhost dhclient[701]: Error printing text.
0.604 | 0801: Sep 14 17:58:54 localhost dhclient[701]: bound to 149.202.161.189 -- renewal in 35659 seconds.
0.579 | 0802: Sep 14 17:58:54 localhost ifup: Determining IP information for eth0... done.
0.000 | 0803: Sep 14 17:58:54 localhost systemd: Started Glean for interface eth0.

0.000 | 0806: Sep 14 17:58:54 localhost systemd: Starting LSB: Bring up/down networking...
0.000 | 0807: Sep 14 17:58:54 localhost network: Bringing up loopback interface: [ OK ]
0.000 | 0808: Sep 14 17:58:54 localhost network: Bringing up interface eth0:
0.618 | 0809: Sep 14 17:58:54 localhost dhclient[926]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.647 | 0810: Sep 14 17:58:54 localhost network: Determining IP information for eth0...Can't create /var/run/dhclient-eth0.pid: Permission denied
0.340 | 0811: Sep 14 17:58:54 localhost dhclient[926]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x1c08b39e)
0.000 | 0812: Sep 14 17:58:54 localhost kernel: iptables dropped: IN=eth0 OUT= MAC=ff:ff:ff:ff:ff:ff:66:4d:f1:cc:3c:eb:08:00 SRC=149.202.160.1 DST=255.255.255.255 LEN=309 TOS=0x00 PREC=0x00 TTL=64 ID=0 PROTO=UDP SPT=67 DPT=68 LEN=289
0.376 | 0813: Sep 14 17:58:54 localhost dhclient[926]: DHCPACK from 149.202.160.1 (xid=0x1c08b39e)
0.542 | 0814: Sep 14 17:58:54 localhost dhclient[926]: suspect value in domain_search option - discarded
0.000 | 0815: Sep 14 17:58:56 localhost ntpd[555]: Listen normally on 4 eth0 149.202.161.189 UDP 123
0.000 | 0816: Sep 14 17:58:56 localhost ntpd[555]: Listen normally on 5 eth0 fe80::f816:3eff:feb8:8bc3 UDP 123
0.708 | 0817: Sep 14 17:58:56 localhost NET[976]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.525 | 0818: Sep 14 17:58:56 localhost dhclient[926]: Error printing text.
0.604 | 0819: Sep 14 17:58:56 localhost dhclient[926]: bound to 149.202.161.189 -- renewal in 35009 seconds.
0.618 | 0820: Sep 14 17:58:56 localhost dhclient[978]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.528 | 0821: Sep 14 17:58:56 localhost network: done.
0.000 | 0822: Sep 14 17:58:56 localhost network: [ OK ]

0.000 | 4317: Sep 14 18:45:30 centos-7-2-node-ovh-gra1-10937570 ironic-conductor: 2017-09-14 18:45:30.538 31475 DEBUG futurist.periodics [-] Submitting periodic callback 'ironic.conductor.manager.ConductorManager._sync_power_states' _process_scheduled /usr/lib/python2.7/site-packages/futurist/periodics.py:639
0.000 | 4318: Sep 14 18:45:30 centos-7-2-node-ovh-gra1-10937570 ironic-conductor: 2017-09-14 18:45:30.544 31475 DEBUG futurist.periodics [-] Submitting periodic callback 'ironic.drivers.modules.inspector.Inspector._periodic_check_result' _process_scheduled /usr/lib/python2.7/site-packages/futurist/periodics.py:639
0.000 | 4319: Sep 14 18:45:49 centos-7-2-node-ovh-gra1-10937570 sshd[810]: error: Could not load host key: /etc/ssh/ssh_host_dsa_key
0.540 | 4320: Sep 14 18:45:49 centos-7-2-node-ovh-gra1-10937570 sshd[810]: Did not receive identification string from 158.85.81.118 port 10000
0.000 | 4321: Sep 14 18:46:20 centos-7-2-node-ovh-gra1-10937570 kernel: iptables dropped: IN=eth0 OUT= MAC=fa:16:3e:b8:8b:c3:66:4d:f1:cc:3c:eb:08:00 SRC=144.217.179.238 DST=149.202.161.189 LEN=52 TOS=0x14 PREC=0x00 TTL=119 ID=2485 DF PROTO=TCP SPT=50392 DPT=445 WINDOW=8192 RES=0x00 SYN URGP=0

0.143 | 9777: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/00/38 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx2e5ea93060ad46818896e-0059bad1d6 - 0.0148 - - 1505415638.279400110 1505415638.294195890 0
0.000 | 9778: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "HEAD /1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx0c142a404ede4dc88cdaa-0059bad1d6" "proxy-server 1582" 0.0010 "-" 3078 0
0.000 | 9779: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server: - - 14/Sep/2017/19/00/38 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - tx0c142a404ede4dc88cdaa-0059bad1d6 - 0.0089 RL - 1505415638.309385061 1505415638.318249941 0
0.209 | 9780: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/00/38 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F047f5c2e-19c5-479e-9589-d62cea1fe510 HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - tx0c142a404ede4dc88cdaa-0059bad1d6 - 0.0172 - - 1505415638.306071043 1505415638.323246002 0
0.000 | 9781: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx57f029da591e40e1baeeb-0059bad1d6" "container-server 3079" 0.0097 "-" 3009 0
0.000 | 9782: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "PUT /1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx57f029da591e40e1baeeb-0059bad1d6" "proxy-server 1583" 0.0402 "-" 3079 0
0.218 | 9783: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/00/38 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx57f029da591e40e1baeeb-0059bad1d6 - 0.0495 - - 1505415638.332298040 1505415638.381819963 0
0.000 | 9784: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "HEAD /1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx0f7be7798e9843629df96-0059bad1d6" "proxy-server 1582" 0.0017 "-" 3080 0

0.143 | 13770: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/39 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - txea87cb69fda94ec7bfc15-0059bad213 - 0.0119 - - 1505415699.261071920 1505415699.273020983 0
0.000 | 13771: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "HEAD /1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "txcee621554908478b89763-0059bad213" "proxy-server 1584" 0.0005 "-" 3080 0
0.000 | 13772: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server: - - 14/Sep/2017/19/01/39 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - txcee621554908478b89763-0059bad213 - 0.0043 RL - 1505415699.281114101 1505415699.285367012 0
0.209 | 13773: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/39 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F635a7644-947c-44e4-8743-4769b2f60cc4 HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - txcee621554908478b89763-0059bad213 - 0.0086 - - 1505415699.279815912 1505415699.288430929 0
0.000 | 13774: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx61eedd717bcc4016ae241-0059bad213" "container-server 3079" 0.0108 "-" 3008 0
0.000 | 13775: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "PUT /1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx61eedd717bcc4016ae241-0059bad213" "proxy-server 1584" 0.0291 "-" 3079 0
0.218 | 13776: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/39 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx61eedd717bcc4016ae241-0059bad213 - 0.0352 - - 1505415699.294846058 1505415699.330060959 0
0.000 | 13777: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "HEAD /1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx70017d1991b14e4cb4f3b-0059bad213" "proxy-server 1584" 0.0024 "-" 3078 0

0.143 | 14872: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/55 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - txba823aaf13b54a359fcaf-0059bad223 - 0.0083 - - 1505415715.131978989 1505415715.140264034 0
0.000 | 14873: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "HEAD /1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx36e477d390324574a9a88-0059bad223" "proxy-server 1582" 0.0004 "-" 3079 0
0.000 | 14874: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server: - - 14/Sep/2017/19/01/55 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - tx36e477d390324574a9a88-0059bad223 - 0.0055 RL - 1505415715.148551941 1505415715.154098034 0
0.209 | 14875: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/55 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F9414f8b2-bba2-4898-bdcf-103d6f1f8059 HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - tx36e477d390324574a9a88-0059bad223 - 0.0125 - - 1505415715.146020889 1505415715.158480883 0
0.000 | 14876: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f004eeb24dc411bb4f3a-0059bad223" "container-server 3079" 0.0059 "-" 3003 0
0.000 | 14877: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "PUT /1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f004eeb24dc411bb4f3a-0059bad223" "proxy-server 1582" 0.0194 "-" 3079 0
0.218 | 14878: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/55 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx4f004eeb24dc411bb4f3a-0059bad223 - 0.0284 - - 1505415715.168113947 1505415715.196496964 0
0.000 | 14879: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "HEAD /1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx103eb9a47ce94c6db1ae2-0059bad223" "proxy-server 1582" 0.0018 "-" 3078 0

0.143 | 15997: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/02/13 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx7419daef9220488599cb6-0059bad235 - 0.0084 - - 1505415733.383479118 1505415733.391885042 0
0.000 | 15998: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "HEAD /1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx2a05d60c0a744f33acbf2-0059bad235" "proxy-server 1583" 0.0004 "-" 3079 0
0.000 | 15999: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server: - - 14/Sep/2017/19/02/13 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - tx2a05d60c0a744f33acbf2-0059bad235 - 0.0038 RL - 1505415733.399992943 1505415733.403815985 0
0.209 | 16000: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/02/13 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F83f22a90-f062-4549-a6ed-590c4011858b HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - tx2a05d60c0a744f33acbf2-0059bad235 - 0.0076 - - 1505415733.398859024 1505415733.406425953 0
0.000 | 16001: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f236364fbfc49bcb96ce-0059bad235" "container-server 3080" 0.0088 "-" 3008 0
0.000 | 16002: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "PUT /1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f236364fbfc49bcb96ce-0059bad235" "proxy-server 1585" 0.0214 "-" 3080 0
0.218 | 16003: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/02/13 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx4f236364fbfc49bcb96ce-0059bad235 - 0.0276 - - 1505415733.415709972 1505415733.443269014 0
0.000 | 16004: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "HEAD /1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx5b74395330c4423f84558-0059bad235" "proxy-server 1584" 0.0039 "-" 3078 0

0.000 | 17334: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server: - - 14/Sep/2017/19/04/45 HEAD /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - Swift - - - - tx3e8df32eab684e9380df8-0059bad2cd - 0.0213 RL - 1505415885.248116970 1505415885.269442081 -
0.000 | 17335: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" 201 - "PUT http://192.168.24.1:6001/1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" "tx3e8df32eab684e9380df8-0059bad2cd" "container-server 3079" 0.0030 "-" 3003 0
0.000 | 17336: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" "tx3e8df32eab684e9380df8-0059bad2cd" "proxy-server 1582" 0.0194 "-" 3079 0
0.205 | 17337: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 PUT /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... - - - tx3e8df32eab684e9380df8-0059bad2cd - 0.3469 - - 1505415885.246094942 1505415885.593013048 0
0.000 | 17338: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "HEAD /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" "tx6c005aa0f8524e9e9bd55-0059bad2cd" "proxy-server 1582" 0.0077 "-" 3003 -
0.033 | 17339: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 HEAD /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... - - - tx6c005aa0f8524e9e9bd55-0059bad2cd - 0.0149 - - 1505415885.599353075 1505415885.614269018 -
0.000 | 17340: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "POST /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "POST http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" "txf775cf95e84549e896b7b-0059bad2cd" "proxy-server 1582" 0.0095 "-" 3008 -
0.279 | 17341: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 POST /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... - - - txf775cf95e84549e896b7b-0059bad2cd - 0.0191 - - 1505415885.619680882 1505415885.638742924 -
0.000 | 17342: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "HEAD /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" "tx57920453ddc24a3d887e8-0059bad2cd" "proxy-server 1582" 0.0027 "-" 3009 -

0.000 | 17351: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server: - - 14/Sep/2017/19/04/45 HEAD /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr HTTP/1.0 204 - Swift - - - - tx5248bf1a6ee44e1fa4120-0059bad2cd - 0.0135 RL - 1505415885.759021997 1505415885.772476912 0
0.000 | 17352: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" 201 - "PUT http://192.168.24.1:8080/1/72/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" "tx5248bf1a6ee44e1fa4120-0059bad2cd" "object-server 3134" 0.0040 "-" 3080 0
0.000 | 17353: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 object-server: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/72/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" "tx5248bf1a6ee44e1fa4120-0059bad2cd" "proxy-server 1582" 0.0247 "-" 3134 0
0.205 | 17354: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 PUT /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... 349 - - tx5248bf1a6ee44e1fa4120-0059bad2cd - 0.0515 - - 1505415885.756985903 1505415885.808475971 0
0.000 | 17355: Sep 14 19:04:47 centos-7-2-node-ovh-gra1-10937570 container-server: 192.168.24.1 - - [14/Sep/2017:19:04:47 +0000] "PUT /1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" 201 - "PUT http://192.168.24.1:8080/1/72/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" "tx9a4176d835324d35baacd-0059bad2cf" "object-server 3132" 0.0009 "-" 3079 0

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/journal.txt.gz
0.000 | 0058: Sep 14 17:58:49 localhost kernel: NODE_DATA(0) allocated [mem 0x233fd7000-0x233ffdfff]
0.000 | 0059: Sep 14 17:58:49 localhost kernel: kvm-clock: Using msrs 4b564d01 and 4b564d00
0.000 | 0060: Sep 14 17:58:49 localhost kernel: kvm-clock: cpu 0, msr 2:33f87001, primary cpu clock
0.320 | 0061: Sep 14 17:58:49 localhost kernel: kvm-clock: using sched offset of 1200854012 cycles
0.000 | 0062: Sep 14 17:58:49 localhost kernel: Zone ranges:

0.000 | 0123: Sep 14 17:58:49 localhost kernel: Kernel command line: BOOT_IMAGE=/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64 root=LABEL=cloudimg-rootfs ro nofb nomodeset vga=normal console=tty0 console=ttyS0,115200 no_timer_check
0.000 | 0124: Sep 14 17:58:49 localhost kernel: PID hash table entries: 4096 (order: 3, 32768 bytes)
0.000 | 0125: Sep 14 17:58:49 localhost kernel: x86/fpu: xstate_offset[2]: 0240, xstate_sizes[2]: 0100
0.489 | 0126: Sep 14 17:58:49 localhost kernel: xsave: enabled xstate_bv 0x7, cntxt size 0x340 using standard form
0.000 | 0127: Sep 14 17:58:49 localhost kernel: Memory: 5139612k/9240576k available (6886k kernel code, 1049112k absent, 234036k reserved, 4545k data, 1764k init)

0.000 | 0167: Sep 14 17:58:49 localhost kernel: Enabled x2apic
0.000 | 0168: Sep 14 17:58:49 localhost kernel: Switched APIC routing to physical x2apic.
0.000 | 0169: Sep 14 17:58:49 localhost kernel: ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
0.267 | 0170: Sep 14 17:58:49 localhost kernel: smpboot: CPU0: Intel Core Processor (Haswell, no TSX) (fam: 06, model: 3c, stepping: 01)
0.443 | 0171: Sep 14 17:58:49 localhost kernel: TSC deadline timer enabled
0.000 | 0172: Sep 14 17:58:49 localhost kernel: Performance Events: unsupported p6 CPU model 60 no PMU driver, software events only.

0.000 | 0884: Sep 14 17:58:51 localhost growroot[506]: + resize2fs /dev/vda1
0.000 | 0885: Sep 14 17:58:51 localhost growroot[506]: resize2fs 1.42.9 (28-Dec-2013)
0.000 | 0886: Sep 14 17:58:51 localhost kernel: EXT4-fs (vda1): resizing filesystem from 3265664 to 20971259 blocks
0.505 | 0887: Sep 14 17:58:52 centos-7-2-node-ovh-gra1-10937570 dhclient[701]: DHCPDISCOVER on eth0 to 255.255.255.255 port 67 interval 6 (xid=0x380ac49c)
0.310 | 0888: Sep 14 17:58:52 centos-7-2-node-ovh-gra1-10937570 dhclient[701]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x380ac49c)
0.304 | 0889: Sep 14 17:58:52 centos-7-2-node-ovh-gra1-10937570 dhclient[701]: DHCPOFFER from 149.202.160.1
0.355 | 0890: Sep 14 17:58:52 centos-7-2-node-ovh-gra1-10937570 dhclient[701]: DHCPACK from 149.202.160.1 (xid=0x380ac49c)
0.532 | 0891: Sep 14 17:58:52 centos-7-2-node-ovh-gra1-10937570 dhclient[701]: suspect value in domain_search option - discarded
0.000 | 0892: Sep 14 17:58:52 centos-7-2-node-ovh-gra1-10937570 kernel: EXT4-fs (vda1): resized filesystem to 20971259

0.000 | 0898: Sep 14 17:58:53 centos-7-2-node-ovh-gra1-10937570 ntpd_intres[571]: host name not found: 1.centos.pool.ntp.org
0.000 | 0899: Sep 14 17:58:53 centos-7-2-node-ovh-gra1-10937570 ntpd_intres[571]: host name not found: 2.centos.pool.ntp.org
0.000 | 0900: Sep 14 17:58:53 centos-7-2-node-ovh-gra1-10937570 ntpd_intres[571]: host name not found: 3.centos.pool.ntp.org
0.689 | 0901: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 NET[753]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.544 | 0902: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 dhclient[701]: Error printing text.
0.596 | 0903: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 dhclient[701]: bound to 149.202.161.189 -- renewal in 35659 seconds.
0.581 | 0904: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 ifup[677]: Determining IP information for eth0... done.
0.000 | 0905: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 systemd[1]: Started Glean for interface eth0.

0.000 | 0908: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 systemd[1]: Starting LSB: Bring up/down networking...
0.000 | 0909: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 network[797]: Bringing up loopback interface: [ OK ]
0.000 | 0910: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 network[797]: Bringing up interface eth0:
0.620 | 0911: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 dhclient[926]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.649 | 0912: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 network[797]: Determining IP information for eth0...Can't create /var/run/dhclient-eth0.pid: Permission denied
0.310 | 0913: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 dhclient[926]: DHCPREQUEST on eth0 to 255.255.255.255 port 67 (xid=0x1c08b39e)
0.000 | 0914: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 kernel: iptables dropped: IN=eth0 OUT= MAC=ff:ff:ff:ff:ff:ff:66:4d:f1:cc:3c:eb:08:00 SRC=149.202.160.1 DST=255.255.255.255 LEN=309 TOS=0x00 PREC=0x00 TTL=64 ID=0 PROTO=UDP SPT=67 DPT=68 LEN=289
0.355 | 0915: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 dhclient[926]: DHCPACK from 149.202.160.1 (xid=0x1c08b39e)
0.532 | 0916: Sep 14 17:58:54 centos-7-2-node-ovh-gra1-10937570 dhclient[926]: suspect value in domain_search option - discarded
0.000 | 0917: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 ntpd[555]: Listen normally on 4 eth0 149.202.161.189 UDP 123
0.000 | 0918: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 ntpd[555]: Listen normally on 5 eth0 fe80::f816:3eff:feb8:8bc3 UDP 123
0.000 | 0919: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 ntpd[555]: new interface(s) found: waking up resolver
0.689 | 0920: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 NET[976]: /usr/sbin/dhclient-script : updated /etc/resolv.conf
0.544 | 0921: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 dhclient[926]: Error printing text.
0.596 | 0922: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 dhclient[926]: bound to 149.202.161.189 -- renewal in 35009 seconds.
0.620 | 0923: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 dhclient[978]: Can't create /var/run/dhclient-eth0.pid: Permission denied
0.530 | 0924: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 network[797]: done.
0.000 | 0925: Sep 14 17:58:56 centos-7-2-node-ovh-gra1-10937570 network[797]: [ OK ]

0.000 | 3397: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Failed password for invalid user admin from 121.14.7.244 port 44085 ssh2
0.000 | 3398: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Connection closed by 121.14.7.244 port 44085 [preauth]
0.000 | 3399: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: PAM 3 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.14.7.244
0.630 | 3400: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: PAM service(sshd) ignoring max retries; 4 > 3
0.000 | 3401: Sep 14 18:32:17 centos-7-2-node-ovh-gra1-10937570 kernel: iptables dropped: IN=eth0 OUT= MAC=fa:16:3e:b8:8b:c3:66:4d:f1:cc:3c:eb:08:00 SRC=10.97.174.231 DST=149.202.161.189 LEN=44 TOS=0x00 PREC=0x00 TTL=41 ID=25158 PROTO=TCP SPT=62747 DPT=443 WINDOW=1024 RES=0x00 SYN URGP=0

0.000 | 5428: Sep 14 18:45:30 centos-7-2-node-ovh-gra1-10937570 ironic-conductor[31475]: 2017-09-14 18:45:30.538 31475 DEBUG futurist.periodics [-] Submitting periodic callback 'ironic.conductor.manager.ConductorManager._sync_power_states' _process_scheduled /usr/lib/python2.7/site-packages/futurist/periodics.py:639
0.000 | 5429: Sep 14 18:45:30 centos-7-2-node-ovh-gra1-10937570 ironic-conductor[31475]: 2017-09-14 18:45:30.544 31475 DEBUG futurist.periodics [-] Submitting periodic callback 'ironic.drivers.modules.inspector.Inspector._periodic_check_result' _process_scheduled /usr/lib/python2.7/site-packages/futurist/periodics.py:639
0.000 | 5430: Sep 14 18:45:49 centos-7-2-node-ovh-gra1-10937570 sshd[810]: error: Could not load host key: /etc/ssh/ssh_host_dsa_key
0.593 | 5431: Sep 14 18:45:49 centos-7-2-node-ovh-gra1-10937570 sshd[810]: Did not receive identification string from 158.85.81.118 port 10000
0.000 | 5432: Sep 14 18:46:20 centos-7-2-node-ovh-gra1-10937570 kernel: iptables dropped: IN=eth0 OUT= MAC=fa:16:3e:b8:8b:c3:66:4d:f1:cc:3c:eb:08:00 SRC=144.217.179.238 DST=149.202.161.189 LEN=52 TOS=0x14 PREC=0x00 TTL=119 ID=2485 DF PROTO=TCP SPT=50392 DPT=445 WINDOW=8192 RES=0x00 SYN URGP=0

0.144 | 11200: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server[1584]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/00/38 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx2e5ea93060ad46818896e-0059bad1d6 - 0.0148 - - 1505415638.279400110 1505415638.294195890 0
0.000 | 11201: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 container-server[3078]: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "HEAD /1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx0c142a404ede4dc88cdaa-0059bad1d6" "proxy-server 1582" 0.0010 "-" 3078 0
0.000 | 11202: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: - - 14/Sep/2017/19/00/38 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - tx0c142a404ede4dc88cdaa-0059bad1d6 - 0.0089 RL - 1505415638.309385061 1505415638.318249941 0
0.207 | 11203: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/00/38 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F047f5c2e-19c5-479e-9589-d62cea1fe510 HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - tx0c142a404ede4dc88cdaa-0059bad1d6 - 0.0172 - - 1505415638.306071043 1505415638.323246002 0
0.000 | 11204: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 account-server[3009]: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx57f029da591e40e1baeeb-0059bad1d6" "container-server 3079" 0.0097 "-" 3009 0
0.000 | 11205: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 container-server[3079]: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "PUT /1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx57f029da591e40e1baeeb-0059bad1d6" "proxy-server 1583" 0.0402 "-" 3079 0
0.216 | 11206: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 proxy-server[1583]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/00/38 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx57f029da591e40e1baeeb-0059bad1d6 - 0.0495 - - 1505415638.332298040 1505415638.381819963 0
0.000 | 11207: Sep 14 19:00:38 centos-7-2-node-ovh-gra1-10937570 container-server[3080]: 192.168.24.1 - - [14/Sep/2017:19:00:38 +0000] "HEAD /1/584/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A4720a69b-b2ee-429b-9a4d-0d39fb3d8cfd%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx0f7be7798e9843629df96-0059bad1d6" "proxy-server 1582" 0.0017 "-" 3080 0

0.144 | 15194: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server[1584]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/39 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - txea87cb69fda94ec7bfc15-0059bad213 - 0.0119 - - 1505415699.261071920 1505415699.273020983 0
0.000 | 15195: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 container-server[3080]: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "HEAD /1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "txcee621554908478b89763-0059bad213" "proxy-server 1584" 0.0005 "-" 3080 0
0.000 | 15196: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server[1584]: - - 14/Sep/2017/19/01/39 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - txcee621554908478b89763-0059bad213 - 0.0043 RL - 1505415699.281114101 1505415699.285367012 0
0.207 | 15197: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server[1584]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/39 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F635a7644-947c-44e4-8743-4769b2f60cc4 HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - txcee621554908478b89763-0059bad213 - 0.0086 - - 1505415699.279815912 1505415699.288430929 0
0.000 | 15198: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 account-server[3008]: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx61eedd717bcc4016ae241-0059bad213" "container-server 3079" 0.0108 "-" 3008 0
0.000 | 15199: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 container-server[3079]: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "PUT /1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx61eedd717bcc4016ae241-0059bad213" "proxy-server 1584" 0.0291 "-" 3079 0
0.216 | 15200: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 proxy-server[1584]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/39 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:447486f4-fe7d-4322-b412-77bd2862ae22:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx61eedd717bcc4016ae241-0059bad213 - 0.0352 - - 1505415699.294846058 1505415699.330060959 0
0.000 | 15201: Sep 14 19:01:39 centos-7-2-node-ovh-gra1-10937570 container-server[3078]: 192.168.24.1 - - [14/Sep/2017:19:01:39 +0000] "HEAD /1/1016/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A447486f4-fe7d-4322-b412-77bd2862ae22%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx70017d1991b14e4cb4f3b-0059bad213" "proxy-server 1584" 0.0024 "-" 3078 0

0.144 | 16296: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/55 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - txba823aaf13b54a359fcaf-0059bad223 - 0.0083 - - 1505415715.131978989 1505415715.140264034 0
0.000 | 16297: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 container-server[3079]: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "HEAD /1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx36e477d390324574a9a88-0059bad223" "proxy-server 1582" 0.0004 "-" 3079 0
0.000 | 16298: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: - - 14/Sep/2017/19/01/55 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - tx36e477d390324574a9a88-0059bad223 - 0.0055 RL - 1505415715.148551941 1505415715.154098034 0
0.207 | 16299: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/55 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F9414f8b2-bba2-4898-bdcf-103d6f1f8059 HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - tx36e477d390324574a9a88-0059bad223 - 0.0125 - - 1505415715.146020889 1505415715.158480883 0
0.000 | 16300: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 account-server[3003]: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f004eeb24dc411bb4f3a-0059bad223" "container-server 3079" 0.0059 "-" 3003 0
0.000 | 16301: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 container-server[3079]: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "PUT /1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f004eeb24dc411bb4f3a-0059bad223" "proxy-server 1582" 0.0194 "-" 3079 0
0.216 | 16302: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/01/55 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:5b59bb1a-9c26-4efe-b768-75898db41c5a:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx4f004eeb24dc411bb4f3a-0059bad223 - 0.0284 - - 1505415715.168113947 1505415715.196496964 0
0.000 | 16303: Sep 14 19:01:55 centos-7-2-node-ovh-gra1-10937570 container-server[3078]: 192.168.24.1 - - [14/Sep/2017:19:01:55 +0000] "HEAD /1/881/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A5b59bb1a-9c26-4efe-b768-75898db41c5a%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx103eb9a47ce94c6db1ae2-0059bad223" "proxy-server 1582" 0.0018 "-" 3078 0

0.144 | 17421: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server[1583]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/02/13 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_message:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx7419daef9220488599cb6-0059bad235 - 0.0084 - - 1505415733.383479118 1505415733.391885042 0
0.000 | 17422: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 container-server[3079]: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "HEAD /1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 404 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx2a05d60c0a744f33acbf2-0059bad235" "proxy-server 1583" 0.0004 "-" 3079 0
0.000 | 17423: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server[1583]: - - 14/Sep/2017/19/02/13 HEAD /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 404 - Swift - - - - tx2a05d60c0a744f33acbf2-0059bad235 - 0.0038 RL - 1505415733.399992943 1505415733.403815985 0
0.207 | 17424: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server[1583]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/02/13 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882/http%25253A%25252F%25252Fcentos-7-2-node-ovh-gra1-10937570%25253A35207%25252F83f22a90-f062-4549-a6ed-590c4011858b HTTP/1.0 404 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - 70 - tx2a05d60c0a744f33acbf2-0059bad235 - 0.0076 - - 1505415733.398859024 1505415733.406425953 0
0.000 | 17425: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 account-server[3008]: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "PUT /1/628/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:6001/1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f236364fbfc49bcb96ce-0059bad235" "container-server 3080" 0.0088 "-" 3008 0
0.000 | 17426: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 container-server[3080]: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "PUT /1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx4f236364fbfc49bcb96ce-0059bad235" "proxy-server 1585" 0.0214 "-" 3080 0
0.216 | 17427: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 proxy-server[1585]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/02/13 PUT /v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber:64b7c9c5-9243-4dda-b2de-de493ef109c9:2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutHV2h4q... - - - tx4f236364fbfc49bcb96ce-0059bad235 - 0.0276 - - 1505415733.415709972 1505415733.443269014 0
0.000 | 17428: Sep 14 19:02:13 centos-7-2-node-ovh-gra1-10937570 container-server[3078]: 192.168.24.1 - - [14/Sep/2017:19:02:13 +0000] "HEAD /1/957/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_66e6a1f3124a4e28b83906ad3dc7f46d/zaqar_subscriber%3A64b7c9c5-9243-4dda-b2de-de493ef109c9%3A2bc0cdfdc3664800b9a07e4b9e4b0882" "tx5b74395330c4423f84558-0059bad235" "proxy-server 1584" 0.0039 "-" 3078 0

0.000 | 18758: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: - - 14/Sep/2017/19/04/45 HEAD /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - Swift - - - - tx3e8df32eab684e9380df8-0059bad2cd - 0.0213 RL - 1505415885.248116970 1505415885.269442081 -
0.000 | 18759: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server[3003]: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" 201 - "PUT http://192.168.24.1:6001/1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" "tx3e8df32eab684e9380df8-0059bad2cd" "container-server 3079" 0.0030 "-" 3003 0
0.000 | 18760: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 container-server[3079]: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr" "tx3e8df32eab684e9380df8-0059bad2cd" "proxy-server 1582" 0.0194 "-" 3079 0
0.203 | 18761: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 PUT /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... - - - tx3e8df32eab684e9380df8-0059bad2cd - 0.3469 - - 1505415885.246094942 1505415885.593013048 0
0.000 | 18762: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server[3003]: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "HEAD /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" "tx6c005aa0f8524e9e9bd55-0059bad2cd" "proxy-server 1582" 0.0077 "-" 3003 -
0.034 | 18763: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 HEAD /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... - - - tx6c005aa0f8524e9e9bd55-0059bad2cd - 0.0149 - - 1505415885.599353075 1505415885.614269018 -
0.000 | 18764: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server[3008]: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "POST /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "POST http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" "txf775cf95e84549e896b7b-0059bad2cd" "proxy-server 1582" 0.0095 "-" 3008 -
0.282 | 18765: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 POST /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882 HTTP/1.0 204 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... - - - txf775cf95e84549e896b7b-0059bad2cd - 0.0191 - - 1505415885.619680882 1505415885.638742924 -
0.000 | 18766: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 account-server[3009]: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "HEAD /1/166/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" 204 - "HEAD http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882" "tx57920453ddc24a3d887e8-0059bad2cd" "proxy-server 1582" 0.0027 "-" 3009 -

0.000 | 18775: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: - - 14/Sep/2017/19/04/45 HEAD /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr HTTP/1.0 204 - Swift - - - - tx5248bf1a6ee44e1fa4120-0059bad2cd - 0.0135 RL - 1505415885.759021997 1505415885.772476912 0
0.000 | 18776: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 container-server[3080]: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" 201 - "PUT http://192.168.24.1:8080/1/72/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" "tx5248bf1a6ee44e1fa4120-0059bad2cd" "object-server 3134" 0.0040 "-" 3080 0
0.000 | 18777: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 object-server[3134]: 192.168.24.1 - - [14/Sep/2017:19:04:45 +0000] "PUT /1/72/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" 201 - "PUT http://192.168.24.1:8080/v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" "tx5248bf1a6ee44e1fa4120-0059bad2cd" "proxy-server 1582" 0.0247 "-" 3134 0
0.203 | 18778: Sep 14 19:04:45 centos-7-2-node-ovh-gra1-10937570 proxy-server[1582]: 192.168.24.1 192.168.24.1 14/Sep/2017/19/04/45 PUT /v1/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1 HTTP/1.0 201 - python-swiftclient-3.4.0 gAAAAABZutLMajw2... 349 - - tx5248bf1a6ee44e1fa4120-0059bad2cd - 0.0515 - - 1505415885.756985903 1505415885.808475971 0
0.000 | 18779: Sep 14 19:04:47 centos-7-2-node-ovh-gra1-10937570 container-server[3079]: 192.168.24.1 - - [14/Sep/2017:19:04:47 +0000] "PUT /1/928/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" 201 - "PUT http://192.168.24.1:8080/1/72/AUTH_2bc0cdfdc3664800b9a07e4b9e4b0882/ov-ntroller-37no2skefaeo-deployed-server-rlmvbys64vlr/1b283ffa-7936-4e5d-b850-d500af0904b1" "tx9a4176d835324d35baacd-0059bad2cf" "object-server 3132" 0.0009 "-" 3079 0

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/secure.txt.gz
0.000 | 0743: Sep 14 18:31:05 centos-7-2-node-ovh-gra1-10937570 groupadd[25998]: group added to /etc/gshadow: name=heat
0.000 | 0744: Sep 14 18:31:05 centos-7-2-node-ovh-gra1-10937570 groupadd[25998]: new group: name=heat, GID=187
0.000 | 0745: Sep 14 18:31:05 centos-7-2-node-ovh-gra1-10937570 useradd[26003]: new user: name=heat, UID=187, GID=187, home=/var/lib/heat, shell=/sbin/nologin
0.377 | 0746: Sep 14 18:31:32 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Invalid user admin from 121.14.7.244 port 44085
0.403 | 0747: Sep 14 18:31:32 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: input_userauth_request: invalid user admin [preauth]
0.000 | 0748: Sep 14 18:31:32 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: pam_unix(sshd:auth): check pass; user unknown

0.000 | 0751: Sep 14 18:31:33 centos-7-2-node-ovh-gra1-10937570 groupadd[26142]: group added to /etc/gshadow: name=ironic
0.000 | 0752: Sep 14 18:31:33 centos-7-2-node-ovh-gra1-10937570 groupadd[26142]: new group: name=ironic, GID=991
0.000 | 0753: Sep 14 18:31:33 centos-7-2-node-ovh-gra1-10937570 useradd[26147]: new user: name=ironic, UID=994, GID=991, home=/var/lib/ironic, shell=/sbin/nologin
0.452 | 0754: Sep 14 18:31:35 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Failed password for invalid user admin from 121.14.7.244 port 44085 ssh2
0.000 | 0755: Sep 14 18:31:36 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: pam_unix(sshd:auth): check pass; user unknown
0.452 | 0756: Sep 14 18:31:37 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Failed password for invalid user admin from 121.14.7.244 port 44085 ssh2
0.000 | 0757: Sep 14 18:31:38 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: pam_unix(sshd:auth): check pass; user unknown
0.452 | 0758: Sep 14 18:31:40 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Failed password for invalid user admin from 121.14.7.244 port 44085 ssh2
0.000 | 0759: Sep 14 18:31:42 centos-7-2-node-ovh-gra1-10937570 groupadd[26241]: group added to /etc/group: name=ironic-inspector, GID=990

0.000 | 0766: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 groupadd[26414]: new group: name=docker, GID=1004
0.000 | 0767: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 usermod[26420]: add 'jenkins' to group 'docker'
0.000 | 0768: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 usermod[26420]: add 'jenkins' to shadow group 'docker'
0.452 | 0769: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Failed password for invalid user admin from 121.14.7.244 port 44085 ssh2
0.029 | 0770: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: Connection closed by 121.14.7.244 port 44085 [preauth]
0.000 | 0771: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: PAM 3 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=121.14.7.244
0.600 | 0772: Sep 14 18:32:13 centos-7-2-node-ovh-gra1-10937570 sshd[26117]: PAM service(sshd) ignoring max retries; 4 > 3
0.000 | 0773: Sep 14 18:32:46 centos-7-2-node-ovh-gra1-10937570 useradd[26477]: new group: name=dockerroot, GID=989

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/heat/heat-engine.log.txt.gz
0.000 | 1276: 2017-09-14 19:02:46.459 31381 DEBUG heat.engine.scheduler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Task create running step /usr/lib/python2.7/site-packages/heat/engine/scheduler.py:214
0.000 | 1277: 2017-09-14 19:02:46.460 31381 INFO heat.engine.resource [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] creating TemplateResource "Networks" Stack "overcloud" [73dccde3-562a-40bb-a2d5-d1a1138fa1d0]
0.157 | 1278: 2017-09-14 19:02:46.468 31378 DEBUG heat.engine.scheduler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Task stack_task from Stack "overcloud-ServiceNetMap-ulc6rjmyg53u" [ef24d960-bc3c-4b2e-9825-656d0191cab8] starting start /usr/lib/python2.7/site-packages/heat/engine/scheduler.py:182
0.277 | 1279: 2017-09-14 19:02:46.469 31378 DEBUG heat.engine.scheduler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Task stack_task from Stack "overcloud-ServiceNetMap-ulc6rjmyg53u" [ef24d960-bc3c-4b2e-9825-656d0191cab8] running step /usr/lib/python2.7/site-packages/heat/engine/scheduler.py:214
0.073 | 1280: 2017-09-14 19:02:46.469 31378 INFO heat.engine.stack [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Stack CREATE IN_PROGRESS (overcloud-ServiceNetMap-ulc6rjmyg53u): Stack CREATE started

0.000 | 1283: 2017-09-14 19:02:46.519 31378 DEBUG heat.engine.scheduler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Task create starting start /usr/lib/python2.7/site-packages/heat/engine/scheduler.py:182
0.000 | 1284: 2017-09-14 19:02:46.519 31378 DEBUG heat.engine.scheduler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Task create running step /usr/lib/python2.7/site-packages/heat/engine/scheduler.py:214
0.137 | 1285: 2017-09-14 19:02:46.519 31378 INFO heat.engine.resource [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] creating Value "ServiceNetMapValue" Stack "overcloud-ServiceNetMap-ulc6rjmyg53u" [ef24d960-bc3c-4b2e-9825-656d0191cab8]
0.253 | 1286: 2017-09-14 19:02:46.571 31378 DEBUG heat.engine.scheduler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Task stack_task from Stack "overcloud-ServiceNetMap-ulc6rjmyg53u" [ef24d960-bc3c-4b2e-9825-656d0191cab8] sleeping _sleep /usr/lib/python2.7/site-packages/heat/engine/scheduler.py:155
0.277 | 1287: 2017-09-14 19:02:46.572 31378 DEBUG heat.engine.scheduler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Task stack_task from Stack "overcloud-ServiceNetMap-ulc6rjmyg53u" [ef24d960-bc3c-4b2e-9825-656d0191cab8] running step /usr/lib/python2.7/site-packages/heat/engine/scheduler.py:214
0.000 | 1288: 2017-09-14 19:02:46.571 31383 DEBUG oslo_policy._cache_handler [req-2a0c9bca-5afa-48b0-bb79-5b7eebd0b09e - admin - default default] Reloading cached file /etc/heat/policy.json read_cached_file /usr/lib/python2.7/site-packages/oslo_policy/_cache_handler.py:40

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/audit/audit.log.txt.gz
0.000 | 0028: type=SERVICE_START msg=audit(1505411931.808:24): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=ip6tables comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'
0.000 | 0029: type=SERVICE_START msg=audit(1505411932.874:25): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=growroot comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'
0.000 | 0030: type=SERVICE_START msg=audit(1505411934.430:26): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=glean@eth0 comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'
0.285 | 0031: type=AVC msg=audit(1505411934.727:27): avc: denied { read } for pid=926 comm="dhclient" name="dhclient-eth0.pid" dev="tmpfs" ino=14316 scontext=system_u:system_r:dhcpc_t:s0 tcontext=system_u:object_r:var_run_t:s0 tclass=file
0.101 | 0032: type=SYSCALL msg=audit(1505411934.727:27): arch=c000003e syscall=2 success=no exit=-13 a0=7ffdc78adf0e a1=80000 a2=1b6 a3=24 items=0 ppid=903 pid=926 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=(none) ses=4294967295 comm="dhclient" exe="/usr/sbin/dhclient" subj=system_u:system_r:dhcpc_t:s0 key=(null)
0.000 | 0033: type=PROCTITLE msg=audit(1505411934.727:27): proctitle=2F7362696E2F6468636C69656E74002D31002D71002D6366002F6574632F646863702F6468636C69656E742D657468302E636F6E66002D6C66002F7661722F6C69622F6468636C69656E742F6468636C69656E742D2D657468302E6C65617365002D7066002F7661722F72756E2F6468636C69656E742D657468302E706964
0.285 | 0034: type=AVC msg=audit(1505411934.727:28): avc: denied { write } for pid=926 comm="dhclient" name="dhclient-eth0.pid" dev="tmpfs" ino=14316 scontext=system_u:system_r:dhcpc_t:s0 tcontext=system_u:object_r:var_run_t:s0 tclass=file
0.101 | 0035: type=SYSCALL msg=audit(1505411934.727:28): arch=c000003e syscall=2 success=no exit=-13 a0=7ffdc78adf0e a1=80241 a2=1a4 a3=24 items=0 ppid=903 pid=926 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=(none) ses=4294967295 comm="dhclient" exe="/usr/sbin/dhclient" subj=system_u:system_r:dhcpc_t:s0 key=(null)
0.000 | 0036: type=PROCTITLE msg=audit(1505411934.727:28): proctitle=2F7362696E2F6468636C69656E74002D31002D71002D6366002F6574632F646863702F6468636C69656E742D657468302E636F6E66002D6C66002F7661722F6C69622F6468636C69656E742F6468636C69656E742D2D657468302E6C65617365002D7066002F7661722F72756E2F6468636C69656E742D657468302E706964
0.285 | 0037: type=AVC msg=audit(1505411936.878:29): avc: denied { write } for pid=978 comm="dhclient" name="dhclient-eth0.pid" dev="tmpfs" ino=14316 scontext=system_u:system_r:dhcpc_t:s0 tcontext=system_u:object_r:var_run_t:s0 tclass=file
0.101 | 0038: type=SYSCALL msg=audit(1505411936.878:29): arch=c000003e syscall=2 success=no exit=-13 a0=7ffdc78adf0e a1=80241 a2=1a4 a3=7fd1298e7b50 items=0 ppid=926 pid=978 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=(none) ses=4294967295 comm="dhclient" exe="/usr/sbin/dhclient" subj=system_u:system_r:dhcpc_t:s0 key=(null)

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/audit/audit.log.txt.gz
0.000 | 0028: type=SERVICE_START msg=audit(1505411943.112:24): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=unbound-keygen comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'
0.000 | 0029: type=SERVICE_START msg=audit(1505411943.876:25): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=growroot comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'
0.000 | 0030: type=SERVICE_START msg=audit(1505411945.652:26): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=system_u:system_r:init_t:s0 msg='unit=glean@eth0 comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'
0.285 | 0031: type=AVC msg=audit(1505411946.030:27): avc: denied { read } for pid=929 comm="dhclient" name="dhclient-eth0.pid" dev="tmpfs" ino=12113 scontext=system_u:system_r:dhcpc_t:s0 tcontext=system_u:object_r:var_run_t:s0 tclass=file
0.101 | 0032: type=SYSCALL msg=audit(1505411946.030:27): arch=c000003e syscall=2 success=no exit=-13 a0=7ffdf3930f07 a1=80000 a2=1b6 a3=24 items=0 ppid=906 pid=929 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=(none) ses=4294967295 comm="dhclient" exe="/usr/sbin/dhclient" subj=system_u:system_r:dhcpc_t:s0 key=(null)
0.000 | 0033: type=PROCTITLE msg=audit(1505411946.030:27): proctitle=2F7362696E2F6468636C69656E74002D31002D71002D6366002F6574632F646863702F6468636C69656E742D657468302E636F6E66002D6C66002F7661722F6C69622F6468636C69656E742F6468636C69656E742D2D657468302E6C65617365002D7066002F7661722F72756E2F6468636C69656E742D657468302E706964
0.285 | 0034: type=AVC msg=audit(1505411946.030:28): avc: denied { write } for pid=929 comm="dhclient" name="dhclient-eth0.pid" dev="tmpfs" ino=12113 scontext=system_u:system_r:dhcpc_t:s0 tcontext=system_u:object_r:var_run_t:s0 tclass=file
0.101 | 0035: type=SYSCALL msg=audit(1505411946.030:28): arch=c000003e syscall=2 success=no exit=-13 a0=7ffdf3930f07 a1=80241 a2=1a4 a3=24 items=0 ppid=906 pid=929 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=(none) ses=4294967295 comm="dhclient" exe="/usr/sbin/dhclient" subj=system_u:system_r:dhcpc_t:s0 key=(null)
0.000 | 0036: type=PROCTITLE msg=audit(1505411946.030:28): proctitle=2F7362696E2F6468636C69656E74002D31002D71002D6366002F6574632F646863702F6468636C69656E742D657468302E636F6E66002D6C66002F7661722F6C69622F6468636C69656E742F6468636C69656E742D2D657468302E6C65617365002D7066002F7661722F72756E2F6468636C69656E742D657468302E706964
0.285 | 0037: type=AVC msg=audit(1505411948.190:29): avc: denied { write } for pid=981 comm="dhclient" name="dhclient-eth0.pid" dev="tmpfs" ino=12113 scontext=system_u:system_r:dhcpc_t:s0 tcontext=system_u:object_r:var_run_t:s0 tclass=file
0.101 | 0038: type=SYSCALL msg=audit(1505411948.190:29): arch=c000003e syscall=2 success=no exit=-13 a0=7ffdf3930f07 a1=80241 a2=1a4 a3=7f32753dbb50 items=0 ppid=929 pid=981 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=(none) ses=4294967295 comm="dhclient" exe="/usr/sbin/dhclient" subj=system_u:system_r:dhcpc_t:s0 key=(null)

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/home/jenkins/repo_setup.log.txt.gz
0.061 | 2674: 2017-09-14 18:23:34 | ++ NODEPOOL_UCA_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-cloud-archive
0.072 | 2675: 2017-09-14 18:23:34 | ++ export NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.082 | 2676: 2017-09-14 18:23:34 | ++ NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.266 | 2677: 2017-09-14 18:23:34 | ++ export NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.318 | 2678: 2017-09-14 18:23:34 | ++ NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.063 | 2679: 2017-09-14 18:23:34 | ++ export NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.079 | 2680: 2017-09-14 18:23:34 | ++ NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.232 | 2681: 2017-09-14 18:23:34 | ++ export NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.281 | 2682: 2017-09-14 18:23:34 | ++ NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.063 | 2683: 2017-09-14 18:23:34 | ++ export NODEPOOL_RUGYGEMS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rubygems/

0.000 | 2698: 2017-09-14 18:23:38 | + tripleo_dlrn=https://trunk.rdoproject.org/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 2699: 2017-09-14 18:23:38 | + [[ -z https://trunk.rdoproject.org/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf ]]
0.000 | 2700: 2017-09-14 18:23:38 | + [[ -z https://trunk.rdoproject.org/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4 ]]
0.232 | 2701: 2017-09-14 18:23:38 | + export RDO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.281 | 2702: 2017-09-14 18:23:38 | + RDO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.232 | 2703: 2017-09-14 18:23:38 | + export TRIPLEO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.281 | 2704: 2017-09-14 18:23:38 | + TRIPLEO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 2705: 2017-09-14 18:23:38 | + sudo rm -rf /tmp/repo_role_tmp

0.000 | 2921: 2017-09-14 18:24:01 | async = True
0.000 | 2922: 2017-09-14 18:24:01 | bandwidth = 0
0.000 | 2923: 2017-09-14 18:24:01 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 2924: 2017-09-14 18:24:01 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 2925: 2017-09-14 18:24:01 | cache = 0

0.000 | 2984: 2017-09-14 18:24:01 | async = True
0.000 | 2985: 2017-09-14 18:24:01 | bandwidth = 0
0.000 | 2986: 2017-09-14 18:24:01 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 2987: 2017-09-14 18:24:01 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.000 | 2988: 2017-09-14 18:24:01 | cache = 0

0.000 | 3057: 2017-09-14 18:24:01 | async = True
0.000 | 3058: 2017-09-14 18:24:01 | bandwidth = 0
0.000 | 3059: 2017-09-14 18:24:01 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.231 | 3060: 2017-09-14 18:24:01 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos/centos/7/cloud/x86_64/openstack-pike/
0.000 | 3061: 2017-09-14 18:24:01 | cache = 0

0.000 | 3599: 2017-09-14 18:24:08 | Downloading packages:
0.000 | 3600: 2017-09-14 18:24:08 | Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
0.000 | 3601: 2017-09-14 18:24:10 | --------------------------------------------------------------------------------
0.328 | 3602: 2017-09-14 18:24:10 | Total 571 kB/s | 1.0 MB 00:01
0.000 | 3603: 2017-09-14 18:24:10 | Running transaction check

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/home/jenkins/repo_setup.log.txt.gz
0.000 | 1840: 2017-09-14 18:23:00 | async = True
0.000 | 1841: 2017-09-14 18:23:00 | bandwidth = 0
0.000 | 1842: 2017-09-14 18:23:00 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 1843: 2017-09-14 18:23:00 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 1844: 2017-09-14 18:23:00 | cache = 0

0.000 | 1903: 2017-09-14 18:23:00 | async = True
0.000 | 1904: 2017-09-14 18:23:00 | bandwidth = 0
0.000 | 1905: 2017-09-14 18:23:00 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 1906: 2017-09-14 18:23:00 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/91/ad/91addc87c67c83b59bbe336e8ecfb80f784cdfbc_0d00ef8f
0.000 | 1907: 2017-09-14 18:23:00 | cache = 0

0.000 | 2039: 2017-09-14 18:23:00 | async = True
0.000 | 2040: 2017-09-14 18:23:00 | bandwidth = 0
0.000 | 2041: 2017-09-14 18:23:00 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.231 | 2042: 2017-09-14 18:23:00 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos/centos/7/cloud/x86_64/openstack-pike/
0.000 | 2043: 2017-09-14 18:23:00 | cache = 0

0.061 | 2533: 2017-09-14 18:23:01 | ++ NODEPOOL_UCA_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-cloud-archive
0.072 | 2534: 2017-09-14 18:23:01 | ++ export NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.082 | 2535: 2017-09-14 18:23:01 | ++ NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.266 | 2536: 2017-09-14 18:23:01 | ++ export NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.318 | 2537: 2017-09-14 18:23:01 | ++ NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.063 | 2538: 2017-09-14 18:23:01 | ++ export NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.079 | 2539: 2017-09-14 18:23:01 | ++ NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.232 | 2540: 2017-09-14 18:23:01 | ++ export NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.281 | 2541: 2017-09-14 18:23:01 | ++ NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.063 | 2542: 2017-09-14 18:23:01 | ++ export NODEPOOL_RUGYGEMS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rubygems/

0.000 | 2557: 2017-09-14 18:23:01 | + tripleo_dlrn=https://trunk.rdoproject.org/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 2558: 2017-09-14 18:23:01 | + [[ -z https://trunk.rdoproject.org/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf ]]
0.000 | 2559: 2017-09-14 18:23:01 | + [[ -z https://trunk.rdoproject.org/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4 ]]
0.232 | 2560: 2017-09-14 18:23:01 | + export RDO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.281 | 2561: 2017-09-14 18:23:01 | + RDO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.232 | 2562: 2017-09-14 18:23:01 | + export TRIPLEO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.281 | 2563: 2017-09-14 18:23:01 | + TRIPLEO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 2564: 2017-09-14 18:23:01 | + sudo rm -rf /tmp/repo_role_tmp

0.000 | 2752: 2017-09-14 18:23:06 | async = True
0.000 | 2753: 2017-09-14 18:23:06 | bandwidth = 0
0.000 | 2754: 2017-09-14 18:23:06 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 2755: 2017-09-14 18:23:06 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 2756: 2017-09-14 18:23:06 | cache = 0

0.000 | 2815: 2017-09-14 18:23:06 | async = True
0.000 | 2816: 2017-09-14 18:23:06 | bandwidth = 0
0.000 | 2817: 2017-09-14 18:23:06 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 2818: 2017-09-14 18:23:06 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.000 | 2819: 2017-09-14 18:23:06 | cache = 0

0.000 | 2888: 2017-09-14 18:23:06 | async = True
0.000 | 2889: 2017-09-14 18:23:06 | bandwidth = 0
0.000 | 2890: 2017-09-14 18:23:06 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.231 | 2891: 2017-09-14 18:23:06 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos/centos/7/cloud/x86_64/openstack-pike/
0.000 | 2892: 2017-09-14 18:23:06 | cache = 0

0.000 | 5076: 2017-09-14 18:27:45 | async = True
0.000 | 5077: 2017-09-14 18:27:45 | bandwidth = 0
0.000 | 5078: 2017-09-14 18:27:45 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 5079: 2017-09-14 18:27:45 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 5080: 2017-09-14 18:27:45 | cache = 0

0.000 | 5139: 2017-09-14 18:27:45 | async = True
0.000 | 5140: 2017-09-14 18:27:45 | bandwidth = 0
0.000 | 5141: 2017-09-14 18:27:45 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 5142: 2017-09-14 18:27:45 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.000 | 5143: 2017-09-14 18:27:45 | cache = 0

0.000 | 5212: 2017-09-14 18:27:45 | async = True
0.000 | 5213: 2017-09-14 18:27:45 | bandwidth = 0
0.000 | 5214: 2017-09-14 18:27:45 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.231 | 5215: 2017-09-14 18:27:45 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos/centos/7/cloud/x86_64/openstack-pike/
0.000 | 5216: 2017-09-14 18:27:45 | cache = 0

0.061 | 6021: 2017-09-14 18:27:45 | ++ NODEPOOL_UCA_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-cloud-archive
0.072 | 6022: 2017-09-14 18:27:45 | ++ export NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.082 | 6023: 2017-09-14 18:27:45 | ++ NODEPOOL_MARIADB_MIRROR=http://mirror.gra1.ovh.openstack.org/ubuntu-mariadb
0.266 | 6024: 2017-09-14 18:27:45 | ++ export NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.318 | 6025: 2017-09-14 18:27:45 | ++ NODEPOOL_BUILDLOGS_CENTOS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos
0.063 | 6026: 2017-09-14 18:27:45 | ++ export NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.079 | 6027: 2017-09-14 18:27:45 | ++ NODEPOOL_DOCKER_REGISTRY_PROXY=http://mirror.gra1.ovh.openstack.org:8081/registry-1.docker/
0.232 | 6028: 2017-09-14 18:27:45 | ++ export NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.281 | 6029: 2017-09-14 18:27:45 | ++ NODEPOOL_RDO_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rdo
0.063 | 6030: 2017-09-14 18:27:45 | ++ export NODEPOOL_RUGYGEMS_PROXY=http://mirror.gra1.ovh.openstack.org:8080/rubygems/

0.000 | 6045: 2017-09-14 18:27:45 | + tripleo_dlrn=https://trunk.rdoproject.org/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 6046: 2017-09-14 18:27:45 | + [[ -z https://trunk.rdoproject.org/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf ]]
0.000 | 6047: 2017-09-14 18:27:45 | + [[ -z https://trunk.rdoproject.org/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4 ]]
0.232 | 6048: 2017-09-14 18:27:45 | + export RDO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.281 | 6049: 2017-09-14 18:27:45 | + RDO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.232 | 6050: 2017-09-14 18:27:45 | + export TRIPLEO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.281 | 6051: 2017-09-14 18:27:45 | + TRIPLEO_DLRN_REPO=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 6052: 2017-09-14 18:27:45 | + sudo rm -rf /tmp/repo_role_tmp

0.000 | 6240: 2017-09-14 18:27:50 | async = True
0.000 | 6241: 2017-09-14 18:27:50 | bandwidth = 0
0.000 | 6242: 2017-09-14 18:27:50 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 6243: 2017-09-14 18:27:50 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/f4/42/f442a3aa35981c3d6d7e312599dde2a1b1d202c9_0468cca4
0.000 | 6244: 2017-09-14 18:27:50 | cache = 0

0.000 | 6303: 2017-09-14 18:27:50 | async = True
0.000 | 6304: 2017-09-14 18:27:50 | bandwidth = 0
0.000 | 6305: 2017-09-14 18:27:50 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.396 | 6306: 2017-09-14 18:27:50 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/dd/5c/dd5c859fa1ef2f37c93d0c5a41c023b7073b8e07_bada63cf
0.000 | 6307: 2017-09-14 18:27:50 | cache = 0

0.000 | 6376: 2017-09-14 18:27:50 | async = True
0.000 | 6377: 2017-09-14 18:27:50 | bandwidth = 0
0.000 | 6378: 2017-09-14 18:27:50 | base_persistdir = /var/lib/yum/repos/x86_64/7
0.231 | 6379: 2017-09-14 18:27:50 | baseurl = http://mirror.gra1.ovh.openstack.org:8080/buildlogs.centos/centos/7/cloud/x86_64/openstack-pike/
0.000 | 6380: 2017-09-14 18:27:50 | cache = 0

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/postci.txt.gz
0.000 | 1599: | | "content": "TJcfX9uVjVupqvW2CChdO5Kg5sMtVS_wmq3QY9sxqS4=" |
0.000 | 1600: | | }, |
0.000 | 1601: | | "/etc/keystone/fernet-keys/1": { |
0.558 | 1602: | | "content": "DbgrrwKJNjYlLqe-Eul_4zJeAznXjy31wCViVGiQ_ps=" |
0.000 | 1603: | | } |

0.000 | 1767: | | "content": "0Df2F-KQ_eFhQH2b9-wqtzZRg7c5PRmdU-LCYKlf6mU=" |
0.000 | 1768: | | }, |
0.000 | 1769: | | "/etc/keystone/credential-keys/0": { |
0.558 | 1770: | | "content": "eyJTsxlRlGZ-ZeN-BFDMs80x6L7McVfVVCe8uuONpKY=" |
0.000 | 1771: | | } |

0.000 | 1874: | | "rabbitmq::ssl": false, |
0.000 | 1875: | | "neutron::agents::ml2::ovs::local_ip": "tenant", |
0.000 | 1876: | | "nova::keystone::authtoken::password": "Baq7zRVmDVdgsa7Pgre26EpEw", |
0.224 | 1877: | | "mysql::server::root_password": "5K9QeIioay", |
0.000 | 1878: | | "glance::api::os_region_name": "regionOne", |

0.000 | 3081: | VipMap | e055af6f-d3c0-44f6-9cb3-51b24087d3a1 | OS::TripleO::Network::Ports::NetVipMap | CREATE_COMPLETE | 2017-09-14T19:02:44Z | overcloud |
0.000 | 3082: | allNodesConfig | 879ecbd3-3c5a-4662-bb14-4310e640d321 | OS::TripleO::AllNodes::SoftwareConfig | CREATE_COMPLETE | 2017-09-14T19:02:44Z | overcloud |
0.000 | 3083: | hostsConfig | 640c1106-ac8d-4248-9dbb-d891a7999bd3 | OS::TripleO::Hosts::SoftwareConfig | CREATE_COMPLETE | 2017-09-14T19:02:44Z | overcloud |
0.213 | 3084: | ServiceNetMapValue | overcloud-ServiceNetMap-ulc6rjmyg53u-ServiceNetMapValue-woropbecffn4 | OS::Heat::Value | CREATE_COMPLETE | 2017-09-14T19:02:46Z | overcloud-ServiceNetMap-ulc6rjmyg53u |
0.000 | 3085: | ExternalNetwork | 8529fc41-93f4-438c-92a1-78f839101837 | OS::TripleO::Network::External | CREATE_COMPLETE | 2017-09-14T19:02:47Z | overcloud-Networks-6zuqlc2ch7z4 |

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/bootstrap-subnodes.log.txt.gz
0.000 | 0195: 100 213 100 213 0 0 480 0 --:--:-- --:--:-- --:--:-- 480
0.000 | 0196: * Closing connection 0
0.000 | 0197: [delorean-current]
0.266 | 0198: name=delorean-openstack-nova-91addc87c67c83b59bbe336e8ecfb80f784cdfbc
0.107 | 0199: baseurl=http://mirror.gra1.ovh.openstack.org:8080/rdo/centos7/91/ad/91addc87c67c83b59bbe336e8ecfb80f784cdfbc_0d00ef8f

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/subnode-2/var/log/cluster/corosync.log.txt.gz
0.000 | 0257: Sep 14 19:16:41 [26894] centos-7-2-node-ovh-gra1-10937570-899082 cib: info: cib_process_request: Forwarding cib_modify operation for section status to all (origin=local/attrd/2)
0.000 | 0258: Sep 14 19:16:41 [26899] centos-7-2-node-ovh-gra1-10937570-899082 crmd: info: abort_transition_graph: Transition aborted: Peer Cancelled | source=do_te_invoke:161 complete=true
0.000 | 0259: Sep 14 19:16:41 [26897] centos-7-2-node-ovh-gra1-10937570-899082 attrd: info: attrd_client_refresh: Updating all attributes
0.281 | 0260: Sep 14 19:16:41 [26897] centos-7-2-node-ovh-gra1-10937570-899082 attrd: info: write_attribute: Write out of 'shutdown' delayed: update 2 in progress
0.063 | 0261: Sep 14 19:16:41 [26897] centos-7-2-node-ovh-gra1-10937570-899082 attrd: info: write_attribute: Sent update 3 with 1 changes for terminate, id=<n/a>, set=(null)

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/heat-deploy-times.log.txt.gz
0.000 | 0174: UserData 1.0
0.000 | 0175: ServiceNetMapValue 0.0
0.000 | 0176: ExternalNetwork 0.0
0.317 | 0177: overcloud-ServiceNetMap-ulc6rjmyg53u 0.0
0.000 | 0178: ControllerPostPuppetMaintenanceModeConfig 0.0

/tmp//logs.openstack.org/42/503842/2/check/gate-tripleo-ci-centos-7-scenario003-multinode-oooq-puppet/04f1aea/logs/undercloud/var/log/mistral/engine.log.txt.gz
0.000 | 5499: Loading callback plugin default of type stdout, v2.0 from /usr/lib/python2.7/site-packages/ansible/plugins/callback/__init__.pyc
0.000 | 5500:
0.000 | 5501: PLAYBOOK: playbook.yaml ********************************************************
0.335 | 5502: 1 plays in /tmp/ansible-mistral-actionq4fk5K/playbook.yaml
0.000 | 5503:

0.000 | 5507: Using module file /usr/lib/python2.7/site-packages/ansible/modules/system/setup.py
0.000 | 5508: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5509: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5510: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5511: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.002 | 5518: <149.202.161.193> (0, '/home/jenkins
0.002 | 5518: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.002 | 5518: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.002 | 5518: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.002 | 5518: debug1: auto-mux: Trying existing master\r
0.002 | 5518: debug1: Control socket "/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f" does not exist\r
0.002 | 5518: debug2: resolving "149.202.161.193" port 22\r
0.002 | 5518: debug2: ssh_connect_direct: needpriv 0\r
0.002 | 5518: debug1: Connecting to 149.202.161.193 [149.202.161.193] port 22.\r
0.002 | 5518: debug2: fd 3 setting O_NONBLOCK\r
0.002 | 5518: debug1: fd 3 clearing O_NONBLOCK\r
0.002 | 5518: debug1: Connection established.\r
0.002 | 5518: debug3: timeout: 10000 ms remain after connect\r
0.002 | 5518: debug1: SELinux support enabled\r
0.002 | 5518: Could not create directory \'/home/mistral/.ssh\'.\r
0.002 | 5518: debug1: key_load_public: No such file or directory\r
0.002 | 5518: debug1: identity file /tmp/ansible-mistral-actionq4fk5K/ssh_private_key type -1\r
0.002 | 5518: debug1: key_load_public: No such file or directory\r
0.002 | 5518: debug1: identity file /tmp/ansible-mistral-actionq4fk5K/ssh_private_key-cert type -1\r
0.002 | 5518: debug1: Enabling compatibility mode for protocol 2.0\r
0.002 | 5518: debug1: Local version string SSH-2.0-OpenSSH_7.4\r
0.002 | 5518: debug1: Remote protocol version 2.0, remote software version OpenSSH_7.4\r
0.002 | 5518: debug1: match: OpenSSH_7.4 pat OpenSSH* compat 0x04000000\r
0.002 | 5518: debug2: fd 3 setting O_NONBLOCK\r
0.002 | 5518: debug1: Authenticating to 149.202.161.193:22 as \'jenkins\'\r
0.002 | 5518: debug3: send packet: type 20\r
0.002 | 5518: debug1: SSH2_MSG_KEXINIT sent\r
0.002 | 5518: debug3: receive packet: type 20\r
0.002 | 5518: debug1: SSH2_MSG_KEXINIT received\r
0.002 | 5518: debug2: local client KEXINIT proposal\r
0.002 | 5518: debug2: KEX algorithms: curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha256,diffie-hellman-group14-sha1,diffie-hellman-group1-sha1,ext-info-c\r
0.002 | 5518: debug2: host key algorithms: ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ssh-dss-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,rsa-sha2-512,rsa-sha2-256,ssh-rsa,ssh-dss\r
0.002 | 5518: debug2: ciphers ctos: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc\r
0.002 | 5518: debug2: ciphers stoc: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc\r
0.002 | 5518: debug2: MACs ctos: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 5518: debug2: MACs stoc: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 5518: debug2: compression ctos: zlib@openssh.com,zlib,none\r
0.002 | 5518: debug2: compression stoc: zlib@openssh.com,zlib,none\r
0.002 | 5518: debug2: languages ctos: \r
0.002 | 5518: debug2: languages stoc: \r
0.002 | 5518: debug2: first_kex_follows 0 \r
0.002 | 5518: debug2: reserved 0 \r
0.002 | 5518: debug2: peer server KEXINIT proposal\r
0.002 | 5518: debug2: KEX algorithms: curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha256,diffie-hellman-group14-sha1,diffie-hellman-group1-sha1\r
0.002 | 5518: debug2: host key algorithms: ssh-rsa,rsa-sha2-512,rsa-sha2-256,ecdsa-sha2-nistp256,ssh-ed25519\r
0.002 | 5518: debug2: ciphers ctos: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc,blowfish-cbc,cast128-cbc,3des-cbc\r
0.002 | 5518: debug2: ciphers stoc: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc,blowfish-cbc,cast128-cbc,3des-cbc\r
0.002 | 5518: debug2: MACs ctos: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 5518: debug2: MACs stoc: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 5518: debug2: compression ctos: none,zlib@openssh.com\r
0.002 | 5518: debug2: compression stoc: none,zlib@openssh.com\r
0.002 | 5518: debug2: languages ctos: \r
0.002 | 5518: debug2: languages stoc: \r
0.002 | 5518: debug2: first_kex_follows 0 \r
0.002 | 5518: debug2: reserved 0 \r
0.002 | 5518: debug1: kex: algorithm: curve25519-sha256\r
0.002 | 5518: debug1: kex: host key algorithm: ecdsa-sha2-nistp256\r
0.002 | 5518: debug1: kex: server->client cipher: chacha20-poly1305@openssh.com MAC: <implicit> compression: zlib@openssh.com\r
0.002 | 5518: debug1: kex: client->server cipher: chacha20-poly1305@openssh.com MAC: <implicit> compression: zlib@openssh.com\r
0.002 | 5518: debug1: kex: curve25519-sha256 need=64 dh_need=64\r
0.002 | 5518: debug1: kex: curve25519-sha256 need=64 dh_need=64\r
0.002 | 5518: debug3: send packet: type 30\r
0.002 | 5518: debug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r
0.002 | 5518: debug3: receive packet: type 31\r
0.002 | 5518: debug1: Server host key: ecdsa-sha2-nistp256 SHA256:QjTnLFrvmlQNpkdDJwfGs31onDLyXf9Zre+9r40bp5w\r
0.002 | 5518: Failed to add the host to the list of known hosts (/home/mistral/.ssh/known_hosts).\r
0.002 | 5518: debug3: send packet: type 21\r
0.002 | 5518: debug2: set_newkeys: mode 1\r
0.002 | 5518: debug1: rekey after 134217728 blocks\r
0.002 | 5518: debug1: SSH2_MSG_NEWKEYS sent\r
0.002 | 5518: debug1: expecting SSH2_MSG_NEWKEYS\r
0.002 | 5518: debug3: receive packet: type 21\r
0.002 | 5518: debug1: SSH2_MSG_NEWKEYS received\r
0.002 | 5518: debug2: set_newkeys: mode 0\r
0.002 | 5518: debug1: rekey after 134217728 blocks\r
0.002 | 5518: debug2: key: /tmp/ansible-mistral-actionq4fk5K/ssh_private_key ((nil)), explicit\r
0.002 | 5518: debug3: send packet: type 5\r
0.002 | 5518: debug3: receive packet: type 7\r
0.002 | 5518: debug1: SSH2_MSG_EXT_INFO received\r
0.002 | 5518: debug1: kex_input_ext_info: server-sig-algs=<rsa-sha2-256,rsa-sha2-512>\r
0.002 | 5518: debug3: receive packet: type 6\r
0.002 | 5518: debug2: service_accept: ssh-userauth\r
0.002 | 5518: debug1: SSH2_MSG_SERVICE_ACCEPT received\r
0.002 | 5518: debug3: send packet: type 50\r
0.002 | 5518: debug3: receive packet: type 51\r
0.002 | 5518: debug1: Authentications that can continue: publickey,gssapi-keyex,gssapi-with-mic,password\r
0.002 | 5518: debug3: start over, passed a different list publickey,gssapi-keyex,gssapi-with-mic,password\r
0.002 | 5518: debug3: preferred gssapi-with-mic,gssapi-keyex,hostbased,publickey\r
0.002 | 5518: debug3: authmethod_lookup gssapi-with-mic\r
0.002 | 5518: debug3: remaining preferred: gssapi-keyex,hostbased,publickey\r
0.002 | 5518: debug3: authmethod_is_enabled gssapi-with-mic\r
0.002 | 5518: debug1: Next authentication method: gssapi-with-mic\r
0.002 | 5518: debug1: Unspecified GSS failure. Minor code may provide more information
0.002 | 5518: No Kerberos credentials available (default cache: KEYRING:persistent:991)
0.002 | 5518: \r
0.002 | 5518: debug1: Unspecified GSS failure. Minor code may provide more information
0.002 | 5518: No Kerberos credentials available (default cache: KEYRING:persistent:991)
0.002 | 5518: \r
0.002 | 5518: debug2: we did not send a packet, disable method\r
0.002 | 5518: debug3: authmethod_lookup gssapi-keyex\r
0.002 | 5518: debug3: remaining preferred: hostbased,publickey\r
0.002 | 5518: debug3: authmethod_is_enabled gssapi-keyex\r
0.002 | 5518: debug1: Next authentication method: gssapi-keyex\r
0.002 | 5518: debug1: No valid Key exchange context\r
0.002 | 5518: debug2: we did not send a packet, disable method\r
0.002 | 5518: debug3: authmethod_lookup publickey\r
0.002 | 5518: debug3: remaining preferred: ,publickey\r
0.002 | 5518: debug3: authmethod_is_enabled publickey\r
0.002 | 5518: debug1: Next authentication method: publickey\r
0.002 | 5518: debug1: Trying private key: /tmp/ansible-mistral-actionq4fk5K/ssh_private_key\r
0.002 | 5518: debug3: sign_and_send_pubkey: RSA SHA256:pHIV+tENqFn52aon+j4HlKDu9wf8tPZsYaodkGIL7rk\r
0.002 | 5518: debug3: send packet: type 50\r
0.002 | 5518: debug2: we sent a publickey packet, wait for reply\r
0.002 | 5518: debug3: receive packet: type 52\r
0.002 | 5518: debug1: Enabling compression at level 6.\r
0.002 | 5518: debug1: Authentication succeeded (publickey).\r
0.002 | 5518: Authenticated to 149.202.161.193 ([149.202.161.193]:22).\r
0.002 | 5518: debug1: setting up multiplex master socket\r
0.002 | 5518: debug3: muxserver_listen: temporary control path /tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f.lsFoyTVSvT0J2bdT\r
0.002 | 5518: debug2: fd 4 setting O_NONBLOCK\r
0.002 | 5518: debug3: fd 4 is O_NONBLOCK\r
0.002 | 5518: debug3: fd 4 is O_NONBLOCK\r
0.002 | 5518: debug1: channel 0: new [/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f]\r
0.002 | 5518: debug3: muxserver_listen: mux listener channel 0 fd 4\r
0.002 | 5518: debug2: fd 3 setting TCP_NODELAY\r
0.002 | 5518: debug3: ssh_packet_set_tos: set IP_TOS 0x08\r
0.002 | 5518: debug1: control_persist_detach: backgrounding master process\r
0.002 | 5518: debug2: control_persist_detach: background process is 9308\r
0.002 | 5518: debug2: fd 4 setting O_NONBLOCK\r
0.002 | 5518: debug1: forking to background\r
0.002 | 5518: debug1: Entering interactive session.\r
0.002 | 5518: debug1: pledge: id\r
0.002 | 5518: debug2: set_control_persist_exit_time: schedule exit in 60 seconds\r
0.002 | 5518: debug1: multiplexing control connection\r
0.002 | 5518: debug2: fd 5 setting O_NONBLOCK\r
0.002 | 5518: debug3: fd 5 is O_NONBLOCK\r
0.002 | 5518: debug1: channel 1: new [mux-control]\r
0.002 | 5518: debug3: channel_post_mux_listener: new mux channel 1 fd 5\r
0.002 | 5518: debug3: mux_master_read_cb: channel 1: hello sent\r
0.002 | 5518: debug2: set_control_persist_exit_time: cancel scheduled exit\r
0.002 | 5518: debug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r
0.002 | 5518: debug2: process_mux_master_hello: channel 1 slave version 4\r
0.002 | 5518: debug2: mux_client_hello_exchange: master version 4\r
0.002 | 5518: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.002 | 5518: debug3: mux_client_request_session: entering\r
0.002 | 5518: debug3: mux_client_request_alive: entering\r
0.002 | 5518: debug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r
0.002 | 5518: debug2: process_mux_alive_check: channel 1: alive check\r
0.002 | 5518: debug3: mux_client_request_alive: done pid = 9310\r
0.002 | 5518: debug3: mux_client_request_session: session request sent\r
0.002 | 5518: debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 66\r
0.002 | 5518: debug2: process_mux_new_session: channel 1: request tty 0, X 0, agent 0, subsys 0, term "", cmd "/bin/sh -c \'echo ~ && sleep 0\'", env 0\r
0.002 | 5518: debug3: process_mux_new_session: got fds stdin 6, stdout 7, stderr 8\r
0.002 | 5518: debug2: fd 7 setting O_NONBLOCK\r
0.002 | 5518: debug2: fd 8 setting O_NONBLOCK\r
0.002 | 5518: debug1: channel 2: new [client-session]\r
0.002 | 5518: debug2: process_mux_new_session: channel_new: 2 linked to control channel 1\r
0.002 | 5518: debug2: channel 2: send open\r
0.002 | 5518: debug3: send packet: type 90\r
0.002 | 5518: debug3: receive packet: type 80\r
0.002 | 5518: debug1: client_input_global_request: rtype hostkeys-00@openssh.com want_reply 0\r
0.002 | 5518: debug3: receive packet: type 91\r
0.002 | 5518: debug2: callback start\r
0.002 | 5518: debug2: client_session2_setup: id 2\r
0.002 | 5518: debug1: Sending command: /bin/sh -c \'echo ~ && sleep 0\'\r
0.002 | 5518: debug2: channel 2: request exec confirm 1\r
0.002 | 5518: debug3: send packet: type 98\r
0.002 | 5518: debug3: mux_session_confirm: sending success reply\r
0.002 | 5518: debug2: callback done\r
0.002 | 5518: debug2: channel 2: open confirm rwindow 0 rmax 32768\r
0.002 | 5518: debug1: mux_client_request_session: master session id: 2\r
0.002 | 5518: debug2: channel 2: rcvd adjust 2097152\r
0.002 | 5518: debug3: receive packet: type 99\r
0.002 | 5518: debug2: channel_input_status_confirm: type 99 id 2\r
0.002 | 5518: debug2: exec request accepted on channel 2\r
0.002 | 5518: debug3: receive packet: type 98\r
0.002 | 5518: debug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r
0.002 | 5518: debug3: mux_exit_message: channel 2: exit message, exitval 0\r
0.002 | 5518: debug3: receive packet: type 98\r
0.002 | 5518: debug1: client_input_channel_req: channel 2 rtype eow@openssh.com reply 0\r
0.002 | 5518: debug2: channel 2: rcvd eow\r
0.002 | 5518: debug2: channel 2: close_read\r
0.002 | 5518: debug2: channel 2: input open -> closed\r
0.002 | 5518: debug3: receive packet: type 96\r
0.002 | 5518: debug2: channel 2: rcvd eof\r
0.002 | 5518: debug2: channel 2: output open -> drain\r
0.002 | 5518: debug2: channel 2: obuf empty\r
0.002 | 5518: debug2: channel 2: close_write\r
0.002 | 5518: debug2: channel 2: output drain -> closed\r
0.002 | 5518: debug3: receive packet: type 97\r
0.002 | 5518: debug2: channel 2: rcvd close\r
0.002 | 5518: debug3: channel 2: will not send data after close\r
0.002 | 5518: debug2: channel 2: send close\r
0.002 | 5518: debug3: send packet: type 97\r
0.002 | 5518: debug2: channel 2: is dead\r
0.002 | 5518: debug2: channel 2: gc: notify user\r
0.002 | 5518: debug3: mux_master_session_cleanup_cb: entering for channel 2\r
0.002 | 5518: debug2: channel 1: rcvd close\r
0.002 | 5518: debug2: channel 1: output open -> drain\r
0.002 | 5518: debug2: channel 1: close_read\r
0.002 | 5518: debug2: channel 1: input open -> closed\r
0.002 | 5518: debug2: channel 2: gc: user detached\r
0.002 | 5518: debug2: channel 2: is dead\r
0.002 | 5518: debug2: channel 2: garbage collecting\r
0.002 | 5518: debug1: channel 2: free: client-session, nchannels 3\r
0.002 | 5518: debug3: channel 2: status: The following connections are open:\r
0.002 | 5518: #1 mux-control (t16 r-1 i3/0 o1/16 fd 5/5 cc -1)\r
0.002 | 5518: #2 client-session (t4 r0 i3/0 o3/0 fd -1/-1 cc -1)\r
0.002 | 5518: \r
0.002 | 5518: debug2: channel 1: obuf empty\r
0.002 | 5518: debug2: channel 1: close_write\r
0.002 | 5518: debug2: channel 1: output drain -> closed\r
0.002 | 5518: debug2: channel 1: is dead (local)\r
0.002 | 5518: debug2: channel 1: gc: notify user\r
0.002 | 5518: debug3: mux_master_control_cleanup_cb: entering for channel 1\r
0.002 | 5518: debug2: channel 1: gc: user detached\r
0.002 | 5518: debug2: channel 1: is dead (local)\r
0.002 | 5518: debug2: channel 1: garbage collecting\r
0.002 | 5518: debug1: channel 1: free: mux-control, nchannels 2\r
0.002 | 5518: debug3: channel 1: status: The following connections are open:\r
0.002 | 5518: #1 mux-control (t16 r-1 i3/0 o3/0 fd 5/5 cc -1)\r
0.002 | 5518: \r
0.002 | 5518: debug2: set_control_persist_exit_time: schedule exit in 60 seconds\r
0.002 | 5518: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.002 | 5518: debug2: Received exit status from master 0\r
0.002 | 5518: ')
0.000 | 5519: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5520: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5521: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5522: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5529: <149.202.161.193> (0, 'ansible-tmp-1505415905.05-163701451985434=/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434
0.000 | 5529: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5529: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5529: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5529: debug1: auto-mux: Trying existing master\r
0.000 | 5529: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5529: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5529: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5529: debug3: mux_client_request_session: entering\r
0.000 | 5529: debug3: mux_client_request_alive: entering\r
0.000 | 5529: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5529: debug3: mux_client_request_session: session request sent\r
0.000 | 5529: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5529: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5529: debug2: Received exit status from master 0\r
0.000 | 5529: ')
0.000 | 5530: <149.202.161.193> PUT /tmp/tmp4nQKPf TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py
0.000 | 5531: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5532: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5533: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5540: <149.202.161.193> (0, 'sftp> put /tmp/tmp4nQKPf /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py
0.000 | 5540: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5540: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5540: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5540: debug1: auto-mux: Trying existing master\r
0.000 | 5540: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5540: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5540: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5540: debug3: mux_client_request_session: entering\r
0.000 | 5540: debug3: mux_client_request_alive: entering\r
0.000 | 5540: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5540: debug3: mux_client_request_session: session request sent\r
0.000 | 5540: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5540: debug2: Remote version: 3\r
0.000 | 5540: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 5540: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 5540: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 5540: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 5540: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 5540: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 5540: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 5540: debug3: Looking up /tmp/tmp4nQKPf\r
0.000 | 5540: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 5540: debug3: Received stat reply T:101 I:2\r
0.000 | 5540: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 5540: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py\r
0.000 | 5540: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 5540: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5540: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 5540: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:32768\r
0.000 | 5540: debug3: Sent message SSH2_FXP_WRITE I:6 O:65536 S:32768\r
0.000 | 5540: debug3: Sent message SSH2_FXP_WRITE I:7 O:98304 S:5484\r
0.000 | 5540: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5540: debug3: In write loop, ack for 5 32768 bytes at 32768\r
0.000 | 5540: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5540: debug3: In write loop, ack for 6 32768 bytes at 65536\r
0.000 | 5540: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5540: debug3: In write loop, ack for 7 5484 bytes at 98304\r
0.000 | 5540: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 5540: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5540: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5540: debug2: Received exit status from master 0\r
0.000 | 5540: ')
0.000 | 5541: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5542: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5543: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5544: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5551: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5551: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5551: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5551: debug1: auto-mux: Trying existing master\r
0.000 | 5551: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5551: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5551: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5551: debug3: mux_client_request_session: entering\r
0.000 | 5551: debug3: mux_client_request_alive: entering\r
0.000 | 5551: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5551: debug3: mux_client_request_session: session request sent\r
0.000 | 5551: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5551: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5551: debug2: Received exit status from master 0\r
0.000 | 5551: ')
0.000 | 5552: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5553: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5554: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5555: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5559: <149.202.161.193> SSH: PlayContext set ssh_extra_args: ()
0.066 | 5560: <149.202.161.193> SSH: found only ControlPersist; added ControlPath: (-o)(ControlPath=/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f)
0.044 | 5561: <149.202.161.193> SSH: EXEC ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o 'IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key"' -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o User=jenkins -o ConnectTimeout=10 '-o StrictHostKeyChecking=no' -o ControlPath=/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f -tt 149.202.161.193 '/bin/sh -c '"'"'sudo -H -S -n -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-teymqwmnyccclmdxyoahyempwuibugiz; /usr/bin/python /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py; rm -rf "/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/" > /dev/null 2>&1'"'"'"'"'"'"'"'"' && sleep 0'"'"''
0.233 | 5562: <149.202.161.193> (0, '\r
0.233 | 5562: {"invocation": {"module_args": {"filter": "*", "gather_subset": ["all"], "fact_path": "/etc/ansible/facts.d", "gather_timeout": 10}}, "changed": false, "ansible_facts": {"facter_operatingsystem": "CentOS", "facter_selinux_current_mode": "enforcing", "ansible_real_group_id": 0, "facter_hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "module_setup": true, "facter_uptime_hours": 1, "ansible_distribution_version": "7.4.1708", "facter_sshfp_rsa": "SSHFP 1 1 89d0d78f9560c406110696027a21b1b8ab57c131\
0.233 | 5562: SSHFP 1 2 4f99546e76a088bbcd63d73e78c6735a073b997a1546488b925460f2eda8ed04", "ansible_env": {"USERNAME": "root", "LANG": "en_US.UTF-8", "TERM": "unknown", "SHELL": "/bin/bash", "SUDO_COMMAND": "/bin/sh -c echo BECOME-SUCCESS-teymqwmnyccclmdxyoahyempwuibugiz; /usr/bin/python /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py; rm -rf \\"/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/\\" > /dev/null 2>&1", "SHLVL": "1", "SUDO_UID": "1000", "SUDO_GID": "1000", "PWD": "/home/jenkins", "LOGNAME": "root", "USER": "root", "PATH": "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin/", "MAIL": "/var/mail/jenkins", "SUDO_USER": "jenkins", "LS_COLORS": "", "HOME": "/root", "_": "/usr/bin/python"}, "facter_swapsize_mb": "8192.00", "facter_lsbmajdistrelease": "7", "facter_gid": "root", "ansible_userspace_bits": "64", "ansible_architecture": "x86_64", "ansible_default_ipv4": {"macaddress": "fa:16:3e:8f:1c:3d", "network": "149.202.161.193", "mtu": 1500, "broadcast": "149.202.161.193", "alias": "eth0", "netmask": "255.255.255.255", "address": "149.202.161.193", "interface": "eth0", "type": "ether", "gateway": "149.202.160.1"}, "ansible_swapfree_mb": 8191, "facter_netmask_eth0": "255.255.255.255", "facter_uuid": "50A0291F-48F8-4E97-8222-92D27C9EFE59", "ansible_cmdline": {"no_timer_check": true, "nomodeset": true, "BOOT_IMAGE": "/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64", "vga": "normal", "console": "ttyS0,115200", "ro": true, "root": "LABEL=cloudimg-rootfs", "nofb": true}, "ansible_selinux": {"status": "enabled", "policyvers": 28, "type": "targeted", "mode": "enforcing", "config_mode": "enforcing"}, "facter_bios_vendor": "SeaBIOS", "ansible_ovs_system": {"macaddress": "fe:44:06:15:b0:4d", "features": {}, "mtu": 1500, "device": "ovs-system", "promisc": true, "active": false, "type": "ether"}, "ansible_userspace_architecture": "x86_64", "facter_lsbdistid": "CentOS", "facter_osfamily": "RedHat", "ansible_pkg_mgr": "yum", "facter_filesystems": "ext2,ext3,ext4,iso9660", "ansible_memory_mb": {"real": {"total": 7792, "used": 3726, "free": 4066}, "swap": {"cached": 0, "total": 8191, "free": 8191, "used": 0}, "nocache": {"used": 879, "free": 6913}}, "ansible_distribution": "CentOS", "ansible_user_dir": "/root", "facter_puppetversion": "4.8.2", "facter_lsbdistrelease": "7.4.1708", "ansible_all_ipv6_addresses": ["fe80::a41d:13ff:fece:75d0", "fe80::8c0e:9eff:fe4c:d45", "fe80::f816:3eff:fe8f:1c3d"], "facter_operatingsystemrelease": "7.4.1708", "ansible_uptime_seconds": 3969, "ansible_kernel": "3.10.0-693.2.2.el7.x86_64", "facter_bios_version": "2:1.10.2-58953eb7", "ansible_system_capabilities_enforced": "True", "ansible_python": {"executable": "/usr/bin/python", "version": {"micro": 5, "major": 2, "releaselevel": "final", "serial": 0, "minor": 7}, "type": "CPython", "has_sslcontext": true, "version_info": [2, 7, 5, "final", 0]}, "ansible_user_shell": "/bin/bash", "facter_processor3": "Intel Core Processor (Haswell, no TSX)", "facter_selinux_config_policy": "targeted", "facter_architecture": "x86_64", "facter_lsbminordistrelease": "4", "ansible_product_serial": "00782d65-d0de-e411-8000-001e67caf4f2", "facter_netmask_br_ex": "255.255.255.0", "facter_blockdevice_vda_size": 85899345920, "facter_fqdn": "centos-7-2-node-ovh-gra1-10937570-899082", "ansible_fips": false, "facter_system_uptime": {"seconds": 3967, "hours": 1, "uptime": "1:06 hours", "days": 0}, "ansible_user_id": "root", "facter_ipaddress_lo": "127.0.0.1", "facter_os": {"release": {"major": "7", "full": "7.4.1708", "minor": "4"}, "lsb": {"distdescription": "CentOS Linux release 7.4.1708 (Core) ", "majdistrelease": "7", "distrelease": "7.4.1708", "release": ":core-4.1-amd64:core-4.1-noarch", "distid": "CentOS", "minordistrelease": "4", "distcodename": "Core"}, "name": "CentOS", "family": "RedHat"}, "facter_operatingsystemmajrelease": "7", "facter_manufacturer": "OpenStack Foundation", "facter_mtu_ovs_system": 1500, "facter_augeasversion": "1.4.0", "facter_memorysize": "7.61 GB", "ansible_vxlan_sys_4789": {"macaddress": "a6:1d:13:ce:75:d0", "features": {}, "mtu": 65470, "device": "vxlan_sys_4789", "promisc": true, "ipv6": [{"scope": "link", "prefix": "64", "address": "fe80::a41d:13ff:fece:75d0"}], "active": true, "type": "ether"}, "facter_path": "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin/", "ansible_processor_vcpus": 8, "facter_network_eth0": "149.202.161.193", "ansible_processor": ["GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)"], "facter_is_virtual": true, "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIx21Jc8Uo0qj7s3h4dP5sJfV3ywMwOCRb1LfZFDp6PpPwrOcmO18h/JoezZ/NODahprEFRzN1y1uMElgteuPk4=", "ansible_mounts": [{"uuid": "2d930d32-4be2-4440-8fec-05114b535f83", "size_total": 80472600576, "mount": "/", "size_available": 58693193728, "fstype": "ext4", "device": "/dev/vda1", "options": "rw,seclabel,relatime,data=ordered"}, {"uuid": "2017-09-14-19-58-45-00", "size_total": 456704, "mount": "/mnt/config", "size_available": 0, "fstype": "iso9660", "device": "/dev/sr0", "options": "ro,relatime,mode=0700"}], "ansible_system_vendor": "OpenStack Foundation", "facter_selinux_policyversion": "28", "facter_sshed25519key": "AAAAC3NzaC1lZDI1NTE5AAAAIMiDcCDeVqcgiZ5Kq6zKs/G85DVDZZvN0jlcrTkb+8Jd", "ansible_virtualization_role": "guest", "ansible_swaptotal_mb": 8191, "facter_selinux_config_mode": "enforcing", "facter_selinux": true, "facter_partitions": {"vda1": {"size": "167770079", "mount": "/", "filesystem": "ext4", "uuid": "2d930d32-4be2-4440-8fec-05114b535f83", "label": "cloudimg-rootfs"}}, "facter_rubyversion": "2.0.0", "ansible_distribution_major_version": "7", "facter_lsbdistdescription": "CentOS Linux release 7.4.1708 (Core) ", "ansible_lsb": {"release": "7.4.1708", "major_release": "7", "codename": "Core", "id": "CentOS", "description": "CentOS Linux release 7.4.1708 (Core)"}, "facter_memoryfree": "6.71 GB", "facter_memorysize_mb": "7792.93", "ansible_default_ipv6": {}, "facter_kernelmajversion": "3.10", "ansible_machine": "x86_64", "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDM1gU/qTArTZ7nQdQ6ZvL7Q6wNxBJclWLUg+TIPX9jLS7jg0enYcdH4C6ZVqjiceYllLo272fbl8kLWOW/YhLp1vIuAz2VVfv70zHy04SyVpeY4utMFccnpZVaiK8uABQkO8PZD0HalDY5BMN9cr6jq/0Jd/ZYVcYPsT3aleSN3CMdAdFMA9DqGDdBKp9UcGaXJ3nt3l+H+XzhyyKOAVu/r48VoUPekU5mkYPVQm/JJnj8yvqRsIalgXJo1uN0zieqgzY5jWV6UjUta0WC27ebwr8TJ/9zZOBE20A1QQmU8bDMvyPu5IajK9DP048rCGoSOP/zKBRGyS7Pvxd8raDx", "facter_ipaddress": "192.168.24.3", "ansible_user_gecos": "root", "facter_kernelrelease": "3.10.0-693.2.2.el7.x86_64", "ansible_br_ex": {"macaddress": "8e:0e:9e:4c:0d:45", "features": {}, "mtu": 1450, "device": "br-ex", "promisc": true, "ipv4": {"broadcast": "global", "netmask": "255.255.255.0", "network": "192.168.24.0", "address": "192.168.24.3"}, "ipv6": [{"scope": "link", "prefix": "64", "address": "fe80::8c0e:9eff:fe4c:d45"}], "active": true, "type": "ether"}, "ansible_processor_threads_per_core": 1, "facter_serialnumber": "00782d65-d0de-e411-8000-001e67caf4f2", "ansible_eth0": {"macaddress": "fa:16:3e:8f:1c:3d", "features": {}, "pciid": "virtio0", "module": "virtio_net", "mtu": 1500, "device": "eth0", "promisc": false, "ipv4": {"broadcast": "149.202.161.193", "netmask": "255.255.255.255", "network": "149.202.161.193", "address": "149.202.161.193"}, "ipv6": [{"scope": "link", "prefix": "64", "address": "fe80::f816:3eff:fe8f:1c3d"}], "active": true, "type": "ether"}, "ansible_system": "Linux", "facter_mtu_lo": 65536, "ansible_all_ipv4_addresses": ["192.168.24.3", "149.202.161.193"], "ansible_python_version": "2.7.5", "facter_kernel": "Linux", "facter_sshecdsakey": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIx21Jc8Uo0qj7s3h4dP5sJfV3ywMwOCRb1LfZFDp6PpPwrOcmO18h/JoezZ/NODahprEFRzN1y1uMElgteuPk4=", "ansible_product_version": "2014.2.4", "ansible_service_mgr": "systemd", "facter_uptime_seconds": 3967, "facter_uptime_days": 0, "ansible_memtotal_mb": 7792, "facter_processor7": "Intel Core Processor (Haswell, no TSX)", "facter_processor6": "Intel Core Processor (Haswell, no TSX)", "facter_processor5": "Intel Core Processor (Haswell, no TSX)", "facter_processor4": "Intel Core Processor (Haswell, no TSX)", "facter_bios_release_date": "04/01/2014", "facter_processor2": "Intel Core Processor (Haswell, no TSX)", "facter_processor1": "Intel Core Processor (Haswell, no TSX)", "facter_processor0": "Intel Core Processor (Haswell, no TSX)", "facter_rubysitedir": "/usr/local/share/ruby/site_ruby/", "ansible_real_user_id": 0, "facter_macaddress": "8e:0e:9e:4c:0d:45", "ansible_dns": {"nameservers": ["127.0.0.1"]}, "ansible_effective_group_id": 0, "ansible_form_factor": "Other", "facter_macaddress_vxlan_sys_4789": "a6:1d:13:ce:75:d0", "facter_productname": "OpenStack Nova", "ansible_lo": {"features": {}, "mtu": 65536, "device": "lo", "promisc": false, "ipv4": {"broadcast": "host", "netmask": "255.0.0.0", "network": "127.0.0.0", "address": "127.0.0.1"}, "ipv6": [{"scope": "host", "prefix": "128", "address": "::1"}], "active": true, "type": "loopback"}, "facter_swapsize": "8.00 GB", "facter_blockdevices": "sr0,vda", "facter_macaddress_ovs_system": "fe:44:06:15:b0:4d", "facter_facterversion": "2.4.4", "ansible_gather_subset": ["hardware", "network", "virtual"], "ansible_apparmor": {"status": "disabled"}, "facter_interfaces": "br_ex,eth0,lo,ovs_system,vxlan_sys_4789", "facter_network_lo": "127.0.0.0", "facter_processorcount": 8, "facter_netmask_lo": "255.0.0.0", "facter_swapfree": "8.00 GB", "ansible_memfree_mb": 4066, "facter_mtu_eth0": 1500, "facter_sshrsakey": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDM1gU/qTArTZ7nQdQ6ZvL7Q6wNxBJclWLUg+TIPX9jLS7jg0enYcdH4C6ZVqjiceYllLo272fbl8kLWOW/YhLp1vIuAz2VVfv70zHy04SyVpeY4utMFccnpZVaiK8uABQkO8PZD0HalDY5BMN9cr6jq/0Jd/ZYVcYPsT3aleSN3CMdAdFMA9DqGDdBKp9UcGaXJ3nt3l+H+XzhyyKOAVu/r48VoUPekU5mkYPVQm/JJnj8yvqRsIalgXJo1uN0zieqgzY5jWV6UjUta0WC27ebwr8TJ/9zZOBE20A1QQmU8bDMvyPu5IajK9DP048rCGoSOP/zKBRGyS7Pvxd8raDx", "ansible_product_name": "OpenStack Nova", "facter_kernelversion": "3.10.0", "facter_processors": {"models": ["Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)"], "count": 8, "physicalcount": 8}, "facter_physicalprocessorcount": 8, "ansible_processor_count": 8, "facter_uniqueid": "ca95c1a1", "ansible_hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "facter_timezone": "UTC", "facter_ipaddress_br_ex": "192.168.24.3", "facter_sshfp_ecdsa": "SSHFP 3 1 973e61eac44b8a6d326eb3b9765626e35af7cec4\
0.233 | 5562: SSHFP 3 2 4234e72c5aef9a540da647432707c6b37d689c32f25dff59adefbdaf8d1ba79c", "ansible_interfaces": ["lo", "ovs-system", "br-ex", "vxlan_sys_4789", "eth0"], "ansible_machine_id": "aa9b364793ba47a28b44d1f9ac674146", "facter_ipaddress_eth0": "149.202.161.193", "facter_virtual": "kvm", "ansible_fqdn": "centos-7-2-node-ovh-gra1-10937570-899082", "ansible_user_gid": 0, "facter_ps": "ps -ef", "facter_netmask": "255.255.255.0", "ansible_nodename": "centos-7-2-node-ovh-gra1-10937570-899082", "facter_mtu_vxlan_sys_4789": 65470, "facter_rubyplatform": "x86_64-linux", "facter_hardwareisa": "x86_64", "ansible_lvm": {"lvs": {}, "vgs": {}}, "ansible_product_uuid": "50A0291F-48F8-4E97-8222-92D27C9EFE59", "ansible_system_capabilities": ["cap_chown", "cap_dac_override", "cap_dac_read_search", "cap_fowner", "cap_fsetid", "cap_kill", "cap_setgid", "cap_setuid", "cap_setpcap", "cap_linux_immutable", "cap_net_bind_service", "cap_net_broadcast", "cap_net_admin", "cap_net_raw", "cap_ipc_lock", "cap_ipc_owner", "cap_sys_module", "cap_sys_rawio", "cap_sys_chroot", "cap_sys_ptrace", "cap_sys_pacct", "cap_sys_admin", "cap_sys_boot", "cap_sys_nice", "cap_sys_resource", "cap_sys_time", "cap_sys_tty_config", "cap_mknod", "cap_lease", "cap_audit_write", "cap_audit_control", "cap_setfcap", "cap_mac_override", "cap_mac_admin", "cap_syslog", "35", "36+ep"], "facter_mtu_br_ex": 1450, "ansible_domain": "", "facter_blockdevice_vda_vendor": "0x1af4", "ansible_date_time": {"weekday_number": "4", "iso8601_basic_short": "20170914T190509", "tz": "UTC", "weeknumber": "37", "hour": "19", "year": "2017", "minute": "05", "tz_offset": "+0000", "month": "09", "epoch": "1505415909", "iso8601_micro": "2017-09-14T19:05:09.491160Z", "weekday": "Thursday", "time": "19:05:09", "date": "2017-09-14", "iso8601": "2017-09-14T19:05:09Z", "day": "14", "iso8601_basic": "20170914T190509491070", "second": "09"}, "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIMiDcCDeVqcgiZ5Kq6zKs/G85DVDZZvN0jlcrTkb+8Jd", "ansible_processor_cores": 1, "facter_macaddress_br_ex": "8e:0e:9e:4c:0d:45", "facter_macaddress_eth0": "fa:16:3e:8f:1c:3d", "ansible_virtualization_type": "openstack", "facter_ec2_metadata": {"instance-type": "ssd-osFoundation-3", "local-ipv4": "149.202.161.193", "reservation-id": "r-rj0lyd87", "local-hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "placement": {"availability-zone": "nova"}, "ami-launch-index": "0", "public-hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "ramdisk-id": "None", "public-keys": {"0": {"openssh-key": ["ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLsTZJ8hXTmzjKxYh/7V07mIy8xl2HL+9BaUlt6A6TMsL3LSvaVQNSgmXX5g0XfPWSCKmkZb1O28q49jQI2n7n7+sHkxn0dJDxj1N2oNrzNY7pDuPrdtCijczLFdievygXNhXNkQ2WIqHXDquN/jfLLJ9L0jxtxtsUMbiL2xxZEZcaf/K5MqyPhscpqiVNE1MjE4xgPbIbv8gCKtPpYIIrktOMb4JbV7rhOp5DcSP5gXtLhOF5fbBpZ+szqrTVUcBX0oTYr3iRfOje9WPsTZIk9vBfBtF416mCNxMSRc7KhSW727AnUu85hS0xiP0MRAf69KemG1OE1pW+LtDIAEYp mordred@camelot", "", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvKYcWK1T7e3PKSFiqb03EYktnoxVASpPoq2rJw2JvhsP0JfS+lKrPzpUQv7L4JCuQMsPNtZ8LnwVEft39k58Kh8XMebSfaqPYAZS5zCNvQUQIhP9myOevBZf4CDeG+gmssqRFcWEwIllfDuIzKBQGVbomR+Y5QuW0HczIbkoOYI6iyf2jB6xg+bmzR2HViofNrSa62CYmHS6dO04Z95J27w6jGWpEOTBjEQvnb9sdBc4EzaBVmxCpa2EilB1u0th7/DvuH0yP4T+X8G8UjW1gZCTOVw06fqlBCST4KjdWw1F/AuOCT7048klbf4H+mCTaEcPzzu3Fkv8ckMWtS/Z9Q== jeblair@operational-necessity", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCnfoVhOTkrY7uoebL8PoHXb0Fg4jJqGCbwkxUdNUdheIdbnfyjuRG3iL8WZnzf7nzWnD+IGo6kkAo8BkNMK9L0P0Y+5IjI8NH49KU22tQ1umij4EIf5tzLh4gsqkJmy6QLrlbf10m6UF4rLFQhKzOd4b2H2K6KbP00CIymvbW3BwvNDODM4xRE2uao387qfvXZBUkB0PpRD+7fWPoN58gpFUm407Eba3WwX5PCD+1DD+RVBsG8maIDXerQ7lvFLoSuyMswv1TfkvCj0ZFhSFbfTd2ZysCu6eryFfeixR7NY9SNcp9YTqG6LrxGA7Ci6wz+hycFHXlDrlBgfFJDe5At clark@work", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3KnRBTH5QPpKjf4RWu4akzYt2gwp796cMkFl5vu8e7G/cHuh4979FeNJXMVP6F3rvZB+yXDHLCU5LBVLq0K+1GbAZT/hH38hpMOIvniwKIquvI6C/drkVPHO6YmVlapw/NI530PGnT/TAqCOycHBO5eF1bYsaqV1yZqvs9v7UZc6J4LukoLZwpmyWZ5P3ltAiiy8+FGq3SLCKWDMmv/Bjz4zTsaNbSWThJi0BydINjC1/0ze5Tyc/XgW1sDuxmmXJxgQp4EvLpronqb2hT60iA52kj8lrmoCIryRpgnbaRA7BrxKF8zIr0ZALHijxEUeWHhFJDIVRGUf0Ef0nrmBv fungi-openstack-2015", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHGuIVB/WxBd7k1R8x2FyfqT6KxRnoM7lE5RE8gvBk2r8cQeH5k1c+P5JrBvWpmqXv4satoivYOBiIb7JXEgIxx62YUx/JQ0J7k3w+av6h4iFe2OhOtEOjMF5F8/wO8a/95OeTZPzBZlUfA3hx754kuw3Q/aBKQUOHWxJOIedGyVHeJc7XiFj3RXIufFuUfng9+p4Z3q6d2/WpuKqs00WI0CLF17PkU4i8P9CraJR1dmsWW6zoxMT2G+DwMFI7ZMS3xrVBRuLwrLlbylVLW2kOJ0JeyjHnRh7X1kR7KG3cGOOjA1YQ0e+mXvremcO3/3o6Iop/N1AtqVuYCKlZc7Y9 slukjanov@mirantis.com", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL9x1rhTVOEQEanrN+ecycaDtAbbh3kr41Rxx7galtLq0JwftjsZqv2Vwl9c8ARmm8HiHcLwDoaZB9gvs6teMScCB+5a1fcohiycJBl2olNFRzkGapDaTvl74aLXQBWaV84D8tUavEl26zcgwrv9WLUsy9pnHoo5K0BzbK7vT2g3VictCphveC2vdjCDeptocWvt4zxCmAY6O7QMKeUjKMlvuy+zCohJcR4BbDnw8EriFAmCeQZcAgfLTyeAvjo384NNIFWyhCwvbCLvpgTplMCp896DWLlXu9eaGUCNjT/sZM8zafAXbfc6OKYFQ5iANAiJktWwKaUaphJkbSVWT5 elizabeth@r2d2", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3onVLOZiiGpQWTCIV0QwHmc3Jvqyl7UaJxIu7D49OQcLHqVZsozI9pSiCdTnWyAaM+E+5wD9yVcSTqMWqn2AZmZSwQ+Fh6KnCgPZ/o63+iCZPGL0RNk20M1iNh5dvdStDnn+j2fpeV/JONF0tBn07QvNL2eF4BwtbTG9Zhl186QNsXjXDghrSO3Etl6DSfcUhxyvMoA2LnclWWD5hLmiRhcBm+PIxveVsr4B+o0k1HV5SUOvJMWtbEC37AH5I818O4fNOob6CnOFaCsbA9oUDzB5rqxutPZb9SmNJpNoLqYqDgyppM0yeql0Kn97tUt7H4j5xHrWoGnJ4IXfuDc0AMmmy4fpcLGkNf7zcBftKS6iz/3AlOXjlp5WZvKxngJj9HIir2SE/qV4Lxw9936BzvAcQyw5+bEsLQJwi+LPZxEqLC6oklkX9dg/+1yBFHsz6mulA0b4Eq7VF9omRzrhhN4iPpU5KQYPRNz7yRYckXDxYnp2lz6yHgSYh2/lqMc+UqmCL9EAWcDw3jsgvJ6kH/YUVUojiRHD9QLqlhOusu1wrTfojjwF05mqkXKmH+LH8f8AJAlMdYg0c2WLlrcxnwCkLLxzU5cYmKcZ41LuLtQR3ik+EKjYzBXXyCEzFm6qQEbR2akpXyxvONgrf7pijrgNOi0GeatUt0bUQcAONYw== jhesketh@infra", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTDia7zLp6NB/DdzhGW/4MDgaQ1yemfF+fGFctrSbBZzP2Aj3RUlBh4Mut3bTIqp/PKNMXVZQbvig5nqF3sB87ZPvmk+7WluFFcQN1RIZnvkYXjF64C+G5PkEZOQW9nqEeElSCV2lXgK98FPrGtK6HgQlYxH5RJa6cufRwYLXLsAwfKRcS3P5oRU2KDORNm6uBfUuX0TyPgtEjYsjCWcffoW+E8kvZbx1DKxF4+u0mWSdkg0P40aAY10mHACtJ4hnu7xNa5Z9Oru1rA1KWL5NHISgy9t5zC1/0jWfYi+tqToBgUCyB8stWgNpHh+QJrpS8CoCDzQLBar0ynnOxBfHH2+s9xJapQNi6ZOC3khWkoxUJn2Gs9FXqow3zGSmEuEKbbUvaGC58U4S0xFcZzF+sOzjRJtw66wE2pQN5Pj/Qw09w6gt05g4nxoxkRVCwMLdnyoIY1oFmywJX3xC1Utu2oCNfgZSn78rqVkE9e11LczPNGvYjl6xQo1r254E0w3QBgo+LaTK5FBRCAbJ76n0IBJ8SZe9foPWjKTGlbCevM6KO8lm58/0m0EfMf9457ZM9KhyXwYvnb+iR7huGC+pwgGemJ4D6vjeE9EUNGSq6igg+v+cl1DHOxVb0s0Tx2T6DMh3usB4C1uoNCR303cmzrNZ94KLXRICQArSClQI7OQ== nibz@hertz", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSR2NmJC8PSanHUpKJuaMmohG80COO2IPkE3Mxhr7US8P1B3p1c6lOrT6M1txRzBY8FlbxfOinGtutP+ADCB2taXfpO8UiaG9eOqojAT/PeP2Y2ov72rVMSWupLozUv2uAR5yyFVFHOjKPYGAa01aJtfzfJujSak8dM0ifFeFwgp/8RBGEfC7atq+45TdrfAURRcEgcOLiF5Aq6fprCOwpllnrH6VoId9YS7u/5xF2/zBjr9PuOP7jEgCaL/+FNqu7jgj87aG5jiZPlweb7GTLJON9H6eFpyfpoJE0sZ1yR9Q+e9FAqQIA44Zi748qKBlFKbLxzoC4mc0SbNUAleEL yolanda@infra", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1CW5E87v8o7O8B5fe7j1uaPCToRdaBukjH2HzQZ+DSGTIPjirLpp5ZXPuyNnmtRMzwld6mlHYlevVEwuZTNyQwS7ut5o0LjyW6yoEcvPq0xMEZLxaso5dZAtzNgf3FzbtaUYBnkhSwX7c24lf8wPGAl7TC3yO0dePQh2lXVdaBiGB9ybVeQr+kwJIxleUE4puuQ+ONJE2D+hHjoQ/huUMpb996pb/YzkjkAxqHguMid0c1taelyW8n17nEDoWvlV9Qqbo8cerhgURo1OBt2zENLjQQ0kOkPxJx4qx3652e0kbkr11y50r9BMs418mnJdWselMxkSqQNZ+XotoH5Dwn+3K2a6Wv4OX3Dqb9SF/JTD7lA/tIkNfxgsRlzfEQ01rK1+g7Je10EnDCLEzHpFjvZ5q4EEMcYqY+osLFpHAOWGLMx+3eY4pz/xEzRP/x3sjGU09uNOZ3oCWUfSkE4xebnnWtxwWZKyFmv3GHtaqJn2UvpAbODPEYyYcOS3XV3zd233W3C09YYnFUyZbGLXpD05Yet5fZfGTnveMRn5/9LZai+dBPwoMWUJdX4yPnGXgOG8zk0u1nWfcNJfYg+xajSUDiMKjDhlkuFK/GXNYuINe42s1TxzL7pJ4X4UhqLiopeJvPg/U5xdCV5pxVKf1MVenrGe2pfwf1Yr2WMv5w== rcarrillocruz@infra", "", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOjz+dkwRWTJcW9Gt3iGHSzRBsvVlTAK6G2oH3+0D41 iwienand+osinfra@redhat.com", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdzEzB2KpNLTTFJGLCNMY53sja37PXFzHHdjWEGaZtaTcuCn/ufV9ql5yhS5/414u9swoHM71H00+nT4uSWcXc2tTRXYWslaiwU47DOtQsD//CvGgIFBNO1EinWhYa5uTSfxI+Z/x4PBu7XFq5wi/JCfJ+iHIWsvXn8U44r1csURcZU0GMPAVG1MO+s3p1W7daVqF9RR7UuwCECb3hdPN1N/M4s6myBiuRXCeDND98dKLf8b342hw+pWvQ3g/OCLcVlYPWT4fy1YGQT8hT+jA2XPfwCtu/k7HKAGH3E8UcnBtY/RI9ibciIFe+Ro7q8t+tp5SgjGLq1NnE4Yp5rpsh david@koala", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCuP0CZE8AYnbm8gxecCxKeRw0wHRyryd+FKmNNsdr0d3UvfCbqNzLigrqEBZsKpofi3M4qCWNpKRyfhnjPynLTQjP1vnX9AbL9UGoiHxScfvh3skntTYMs9ezJRd0rMJJZO76FPo8bJLDlwxAQl8m/nuj3HfYiO5hYE7P+a3rhsJh4nEfBb7xh+Q5yM0PWObkkBl6IRiBYjlcsXNZHgTA5kNuihUk5bHqAw54sHh05DhpgOITpTw4LFbh4Ew2NKq49dEb2xbTuAyAr2DHNOGgIwKEZpwtKZEIGEuiLbb4DQRsfivrvyOjnK2NFjQzGyNOHfsOldWHRQwUKUs8nrxKdXvqcrfMnSVaibeYK2TRL+6jd9kc5SIhWI3XLm7HbX7uXMD7/JQrkL25Rcs6nndDCH72DJLz+ynA/T5umMbNBQ9tybL5z73IOpfShRGjQYego22CxDOy7e/5OEMHNoksbFb1S02viM9O2puS7LDqqfT9JIbbPqCrbRi/zOXo0f4EXo6xKUAmd8qlV+6f/p57/qFihzQDaRFVlFEH3k7qwsw7PYGUTwkPaThe6xyZN6D5jqxCZU3aSYu+FGb0oYo+M5IxOm0Cb4NNsvvkRPxWtwSayfFGu6+m/+/RyA3GBcAMev7AuyKN+K2vGMsLagHOx4i+5ZAcUwGzLeXAENNum3w== pabelanger@redhat.com"]}}, "ami-id": "ami-00021567", "kernel-id": "None", "public-ipv4": null, "block-device-mapping": {"ami": "vda", "root": "/dev/vda"}, "ami-manifest-path": "FIXME", "security-groups": null, "instance-action": "none", "instance-id": "i-006a62e0"}, "facter_memoryfree_mb": "6873.67", "ansible_distribution_release": "Core", "ansible_os_family": "RedHat", "facter_lsbdistcodename": "Core", "ansible_effective_user_id": 0, "facter_sshfp_ed25519": "SSHFP 4 1 a54448a9f87a2db5e04b69de1cd2d6266b1bec6e\
0.233 | 5562: SSHFP 4 2 dde561c94266ebc1e9307e2faf9a049f2599db641ffcac93c55ab4b71025ceb8", "facter_network_br_ex": "192.168.24.0", "facter_lsbrelease": ":core-4.1-amd64:core-4.1-noarch", "ansible_devices": {"vda": {"scheduler_mode": "", "rotational": "1", "vendor": "0x1af4", "sectors": "167772160", "sas_device_handle": null, "sas_address": null, "host": "SCSI storage controller: Red Hat, Inc Virtio block device", "sectorsize": "512", "removable": "0", "support_discard": "0", "model": null, "partitions": {"vda1": {"sectorsize": 512, "uuid": "2d930d32-4be2-4440-8fec-05114b535f83", "sectors": "167770079", "start": "2048", "holders": [], "size": "80.00 GB"}}, "holders": [], "size": "80.00 GB"}, "sr0": {"scheduler_mode": "cfq", "rotational": "1", "vendor": "QEMU", "sectors": "892", "sas_device_handle": null, "sas_address": null, "host": "IDE interface: Intel Corporation 82371SB PIIX3 IDE [Natoma/Triton II]", "sectorsize": "2048", "removable": "1", "support_discard": "0", "model": "QEMU DVD-ROM", "partitions": {}, "holders": [], "size": "1.74 MB"}}, "ansible_user_uid": 0, "ansible_bios_date": "04/01/2014", "facter_selinux_enforced": true, "facter_uptime": "1:06 hours", "facter_blockdevice_sr0_vendor": "QEMU", "ansible_bios_version": "2:1.10.2-58953eb7", "facter_blockdevice_sr0_size": 456704, "facter_swapfree_mb": "8192.00", "facter_hardwaremodel": "x86_64", "facter_id": "root", "facter_type": "Other", "facter_blockdevice_sr0_model": "QEMU DVD-ROM"}}\r
0.233 | 5562: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.233 | 5562: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.233 | 5562: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.233 | 5562: debug1: auto-mux: Trying existing master\r
0.233 | 5562: debug2: fd 3 setting O_NONBLOCK\r
0.233 | 5562: debug2: mux_client_hello_exchange: master version 4\r
0.233 | 5562: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.233 | 5562: debug3: mux_client_request_session: entering\r
0.233 | 5562: debug3: mux_client_request_alive: entering\r
0.233 | 5562: debug3: mux_client_request_alive: done pid = 9310\r
0.233 | 5562: debug3: mux_client_request_session: session request sent\r
0.233 | 5562: debug1: mux_client_request_session: master session id: 2\r
0.233 | 5562: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.233 | 5562: debug2: Received exit status from master 0\r
0.233 | 5562: Shared connection to 149.202.161.193 closed.\r
0.233 | 5562: ')
0.000 | 5563: ok: [149.202.161.193]
0.000 | 5564: META: ran handlers
0.000 | 5565:
0.000 | 5566: TASK [create user tripleo-admin] ***********************************************
0.416 | 5567: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:3
0.000 | 5568: Using module file /usr/lib/python2.7/site-packages/ansible/modules/system/user.py
0.000 | 5569: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5570: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5571: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5572: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5579: <149.202.161.193> (0, '/home/jenkins
0.000 | 5579: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5579: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5579: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5579: debug1: auto-mux: Trying existing master\r
0.000 | 5579: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5579: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5579: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5579: debug3: mux_client_request_session: entering\r
0.000 | 5579: debug3: mux_client_request_alive: entering\r
0.000 | 5579: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5579: debug3: mux_client_request_session: session request sent\r
0.000 | 5579: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5579: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5579: debug2: Received exit status from master 0\r
0.000 | 5579: ')
0.000 | 5580: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5581: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5582: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5583: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5590: <149.202.161.193> (0, 'ansible-tmp-1505415909.92-41229317469196=/home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196
0.000 | 5590: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5590: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5590: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5590: debug1: auto-mux: Trying existing master\r
0.000 | 5590: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5590: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5590: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5590: debug3: mux_client_request_session: entering\r
0.000 | 5590: debug3: mux_client_request_alive: entering\r
0.000 | 5590: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5590: debug3: mux_client_request_session: session request sent\r
0.000 | 5590: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5590: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5590: debug2: Received exit status from master 0\r
0.000 | 5590: ')
0.000 | 5591: <149.202.161.193> PUT /tmp/tmp4VSvIE TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196/user.py
0.000 | 5592: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5593: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5594: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5601: <149.202.161.193> (0, 'sftp> put /tmp/tmp4VSvIE /home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196/user.py
0.000 | 5601: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5601: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5601: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5601: debug1: auto-mux: Trying existing master\r
0.000 | 5601: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5601: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5601: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5601: debug3: mux_client_request_session: entering\r
0.000 | 5601: debug3: mux_client_request_alive: entering\r
0.000 | 5601: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5601: debug3: mux_client_request_session: session request sent\r
0.000 | 5601: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5601: debug2: Remote version: 3\r
0.000 | 5601: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 5601: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 5601: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 5601: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 5601: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 5601: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 5601: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 5601: debug3: Looking up /tmp/tmp4VSvIE\r
0.000 | 5601: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 5601: debug3: Received stat reply T:101 I:2\r
0.000 | 5601: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 5601: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196/user.py\r
0.000 | 5601: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 5601: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5601: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 5601: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:32768\r
0.000 | 5601: debug3: Sent message SSH2_FXP_WRITE I:6 O:65536 S:5697\r
0.000 | 5601: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5601: debug3: In write loop, ack for 5 32768 bytes at 32768\r
0.000 | 5601: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5601: debug3: In write loop, ack for 6 5697 bytes at 65536\r
0.000 | 5601: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 5601: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5601: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5601: debug2: Received exit status from master 0\r
0.000 | 5601: ')
0.000 | 5602: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5603: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5604: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5605: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5612: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5612: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5612: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5612: debug1: auto-mux: Trying existing master\r
0.000 | 5612: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5612: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5612: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5612: debug3: mux_client_request_session: entering\r
0.000 | 5612: debug3: mux_client_request_alive: entering\r
0.000 | 5612: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5612: debug3: mux_client_request_session: session request sent\r
0.000 | 5612: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5612: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5612: debug2: Received exit status from master 0\r
0.000 | 5612: ')
0.000 | 5613: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5614: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5615: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5616: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5666: }
0.000 | 5667:
0.000 | 5668: TASK [grant admin rights to user tripleo-admin] ********************************
0.416 | 5669: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:5
0.000 | 5670: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/stat.py
0.000 | 5671: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5672: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5673: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5674: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5681: <149.202.161.193> (0, '/home/jenkins
0.000 | 5681: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5681: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5681: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5681: debug1: auto-mux: Trying existing master\r
0.000 | 5681: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5681: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5681: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5681: debug3: mux_client_request_session: entering\r
0.000 | 5681: debug3: mux_client_request_alive: entering\r
0.000 | 5681: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5681: debug3: mux_client_request_session: session request sent\r
0.000 | 5681: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5681: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5681: debug2: Received exit status from master 0\r
0.000 | 5681: ')
0.000 | 5682: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5683: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5684: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5685: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5692: <149.202.161.193> (0, 'ansible-tmp-1505415910.55-235869433965926=/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926
0.000 | 5692: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5692: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5692: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5692: debug1: auto-mux: Trying existing master\r
0.000 | 5692: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5692: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5692: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5692: debug3: mux_client_request_session: entering\r
0.000 | 5692: debug3: mux_client_request_alive: entering\r
0.000 | 5692: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5692: debug3: mux_client_request_session: session request sent\r
0.000 | 5692: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5692: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5692: debug2: Received exit status from master 0\r
0.000 | 5692: ')
0.000 | 5693: <149.202.161.193> PUT /tmp/tmpnszOvS TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926/stat.py
0.000 | 5694: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5695: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5696: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5703: <149.202.161.193> (0, 'sftp> put /tmp/tmpnszOvS /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926/stat.py
0.000 | 5703: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5703: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5703: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5703: debug1: auto-mux: Trying existing master\r
0.000 | 5703: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5703: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5703: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5703: debug3: mux_client_request_session: entering\r
0.000 | 5703: debug3: mux_client_request_alive: entering\r
0.000 | 5703: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5703: debug3: mux_client_request_session: session request sent\r
0.000 | 5703: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5703: debug2: Remote version: 3\r
0.000 | 5703: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 5703: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 5703: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 5703: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 5703: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 5703: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 5703: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 5703: debug3: Looking up /tmp/tmpnszOvS\r
0.000 | 5703: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 5703: debug3: Received stat reply T:101 I:2\r
0.000 | 5703: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 5703: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926/stat.py\r
0.000 | 5703: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 5703: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5703: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 5703: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:27907\r
0.000 | 5703: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5703: debug3: In write loop, ack for 5 27907 bytes at 32768\r
0.000 | 5703: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 5703: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5703: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5703: debug2: Received exit status from master 0\r
0.000 | 5703: ')
0.000 | 5704: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5705: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5706: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5707: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5714: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5714: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5714: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5714: debug1: auto-mux: Trying existing master\r
0.000 | 5714: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5714: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5714: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5714: debug3: mux_client_request_session: entering\r
0.000 | 5714: debug3: mux_client_request_alive: entering\r
0.000 | 5714: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5714: debug3: mux_client_request_session: session request sent\r
0.000 | 5714: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5714: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5714: debug2: Received exit status from master 0\r
0.000 | 5714: ')
0.000 | 5715: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5716: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5717: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5718: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5725: <149.202.161.193> (0, '\r
0.000 | 5725: {"invocation": {"module_args": {"checksum_algorithm": "sha1", "get_checksum": true, "path": "/etc/sudoers.d/tripleo-admin", "checksum_algo": "sha1", "follow": false, "get_md5": false, "get_mime": true, "get_attributes": true}}, "stat": {"exists": false}, "changed": false}\r
0.000 | 5725: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5725: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5725: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5725: debug1: auto-mux: Trying existing master\r
0.000 | 5725: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5725: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5725: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5725: debug3: mux_client_request_session: entering\r
0.000 | 5725: debug3: mux_client_request_alive: entering\r
0.000 | 5725: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5725: debug3: mux_client_request_session: session request sent\r
0.000 | 5725: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5725: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5725: debug2: Received exit status from master 0\r
0.000 | 5725: Shared connection to 149.202.161.193 closed.\r
0.000 | 5725: ')
0.000 | 5726: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5727: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5728: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5729: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5736: <149.202.161.193> (0, '/home/jenkins
0.000 | 5736: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5736: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5736: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5736: debug1: auto-mux: Trying existing master\r
0.000 | 5736: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5736: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5736: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5736: debug3: mux_client_request_session: entering\r
0.000 | 5736: debug3: mux_client_request_alive: entering\r
0.000 | 5736: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5736: debug3: mux_client_request_session: session request sent\r
0.000 | 5736: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5736: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5736: debug2: Received exit status from master 0\r
0.000 | 5736: ')
0.000 | 5737: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5738: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5739: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5740: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5747: <149.202.161.193> (0, 'ansible-tmp-1505415910.91-263263336394362=/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362
0.000 | 5747: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5747: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5747: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5747: debug1: auto-mux: Trying existing master\r
0.000 | 5747: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5747: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5747: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5747: debug3: mux_client_request_session: entering\r
0.000 | 5747: debug3: mux_client_request_alive: entering\r
0.000 | 5747: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5747: debug3: mux_client_request_session: session request sent\r
0.000 | 5747: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5747: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5747: debug2: Received exit status from master 0\r
0.000 | 5747: ')
0.000 | 5748: <149.202.161.193> PUT /tmp/tmplExnaD TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/source
0.000 | 5749: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5750: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5751: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5758: <149.202.161.193> (0, 'sftp> put /tmp/tmplExnaD /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/source
0.000 | 5758: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5758: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5758: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5758: debug1: auto-mux: Trying existing master\r
0.000 | 5758: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5758: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5758: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5758: debug3: mux_client_request_session: entering\r
0.000 | 5758: debug3: mux_client_request_alive: entering\r
0.000 | 5758: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5758: debug3: mux_client_request_session: session request sent\r
0.000 | 5758: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5758: debug2: Remote version: 3\r
0.000 | 5758: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 5758: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 5758: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 5758: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 5758: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 5758: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 5758: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 5758: debug3: Looking up /tmp/tmplExnaD\r
0.000 | 5758: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 5758: debug3: Received stat reply T:101 I:2\r
0.000 | 5758: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 5758: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/source\r
0.000 | 5758: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:37\r
0.000 | 5758: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5758: debug3: In write loop, ack for 4 37 bytes at 0\r
0.000 | 5758: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 5758: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5758: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5758: debug2: Received exit status from master 0\r
0.000 | 5758: ')
0.000 | 5759: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5760: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5761: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5762: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5770: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/copy.py
0.000 | 5771: <149.202.161.193> PUT /tmp/tmpQnOWmw TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/copy.py
0.000 | 5772: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5773: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5774: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5781: <149.202.161.193> (0, 'sftp> put /tmp/tmpQnOWmw /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/copy.py
0.000 | 5781: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5781: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5781: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5781: debug1: auto-mux: Trying existing master\r
0.000 | 5781: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5781: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5781: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5781: debug3: mux_client_request_session: entering\r
0.000 | 5781: debug3: mux_client_request_alive: entering\r
0.000 | 5781: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5781: debug3: mux_client_request_session: session request sent\r
0.000 | 5781: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5781: debug2: Remote version: 3\r
0.000 | 5781: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 5781: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 5781: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 5781: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 5781: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 5781: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 5781: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 5781: debug3: Looking up /tmp/tmpQnOWmw\r
0.000 | 5781: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 5781: debug3: Received stat reply T:101 I:2\r
0.000 | 5781: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 5781: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/copy.py\r
0.000 | 5781: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 5781: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5781: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 5781: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28018\r
0.000 | 5781: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5781: debug3: In write loop, ack for 5 28018 bytes at 32768\r
0.000 | 5781: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 5781: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5781: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5781: debug2: Received exit status from master 0\r
0.000 | 5781: ')
0.000 | 5782: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5783: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5784: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5785: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5792: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5792: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5792: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5792: debug1: auto-mux: Trying existing master\r
0.000 | 5792: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5792: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5792: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5792: debug3: mux_client_request_session: entering\r
0.000 | 5792: debug3: mux_client_request_alive: entering\r
0.000 | 5792: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5792: debug3: mux_client_request_session: session request sent\r
0.000 | 5792: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5792: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5792: debug2: Received exit status from master 0\r
0.000 | 5792: ')
0.000 | 5793: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5794: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5795: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5796: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5843: }
0.000 | 5844:
0.000 | 5845: TASK [ensure .ssh dir exists for user tripleo-admin] ***************************
0.416 | 5846: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:9
0.000 | 5847: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/file.py
0.000 | 5848: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5849: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5850: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5851: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5858: <149.202.161.193> (0, '/home/jenkins
0.000 | 5858: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5858: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5858: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5858: debug1: auto-mux: Trying existing master\r
0.000 | 5858: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5858: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5858: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5858: debug3: mux_client_request_session: entering\r
0.000 | 5858: debug3: mux_client_request_alive: entering\r
0.000 | 5858: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5858: debug3: mux_client_request_session: session request sent\r
0.000 | 5858: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5858: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5858: debug2: Received exit status from master 0\r
0.000 | 5858: ')
0.000 | 5859: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5860: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5861: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5862: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5869: <149.202.161.193> (0, 'ansible-tmp-1505415911.71-211122090089966=/home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966
0.000 | 5869: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5869: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5869: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5869: debug1: auto-mux: Trying existing master\r
0.000 | 5869: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5869: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5869: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5869: debug3: mux_client_request_session: entering\r
0.000 | 5869: debug3: mux_client_request_alive: entering\r
0.000 | 5869: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5869: debug3: mux_client_request_session: session request sent\r
0.000 | 5869: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5869: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5869: debug2: Received exit status from master 0\r
0.000 | 5869: ')
0.000 | 5870: <149.202.161.193> PUT /tmp/tmpTZUrAq TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966/file.py
0.000 | 5871: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5872: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5873: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5880: <149.202.161.193> (0, 'sftp> put /tmp/tmpTZUrAq /home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966/file.py
0.000 | 5880: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5880: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5880: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5880: debug1: auto-mux: Trying existing master\r
0.000 | 5880: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5880: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5880: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5880: debug3: mux_client_request_session: entering\r
0.000 | 5880: debug3: mux_client_request_alive: entering\r
0.000 | 5880: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5880: debug3: mux_client_request_session: session request sent\r
0.000 | 5880: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5880: debug2: Remote version: 3\r
0.000 | 5880: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 5880: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 5880: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 5880: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 5880: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 5880: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 5880: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 5880: debug3: Looking up /tmp/tmpTZUrAq\r
0.000 | 5880: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 5880: debug3: Received stat reply T:101 I:2\r
0.000 | 5880: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 5880: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966/file.py\r
0.000 | 5880: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 5880: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5880: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 5880: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28880\r
0.000 | 5880: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5880: debug3: In write loop, ack for 5 28880 bytes at 32768\r
0.000 | 5880: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 5880: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5880: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5880: debug2: Received exit status from master 0\r
0.000 | 5880: ')
0.000 | 5881: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5882: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5883: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5884: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5891: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5891: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5891: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5891: debug1: auto-mux: Trying existing master\r
0.000 | 5891: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5891: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5891: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5891: debug3: mux_client_request_session: entering\r
0.000 | 5891: debug3: mux_client_request_alive: entering\r
0.000 | 5891: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5891: debug3: mux_client_request_session: session request sent\r
0.000 | 5891: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5891: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5891: debug2: Received exit status from master 0\r
0.000 | 5891: ')
0.000 | 5892: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5893: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5894: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5895: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5958: }
0.000 | 5959:
0.000 | 5960: TASK [ensure authorized_keys file exists for user tripleo-admin] ***************
0.416 | 5961: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:12
0.000 | 5962: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/file.py
0.000 | 5963: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5964: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5965: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5966: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5973: <149.202.161.193> (0, '/home/jenkins
0.000 | 5973: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5973: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5973: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5973: debug1: auto-mux: Trying existing master\r
0.000 | 5973: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5973: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5973: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5973: debug3: mux_client_request_session: entering\r
0.000 | 5973: debug3: mux_client_request_alive: entering\r
0.000 | 5973: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5973: debug3: mux_client_request_session: session request sent\r
0.000 | 5973: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5973: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5973: debug2: Received exit status from master 0\r
0.000 | 5973: ')
0.000 | 5974: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5975: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5976: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5977: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5984: <149.202.161.193> (0, 'ansible-tmp-1505415912.11-62209096158516=/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516
0.000 | 5984: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5984: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5984: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5984: debug1: auto-mux: Trying existing master\r
0.000 | 5984: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5984: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5984: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5984: debug3: mux_client_request_session: entering\r
0.000 | 5984: debug3: mux_client_request_alive: entering\r
0.000 | 5984: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5984: debug3: mux_client_request_session: session request sent\r
0.000 | 5984: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5984: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5984: debug2: Received exit status from master 0\r
0.000 | 5984: ')
0.000 | 5985: <149.202.161.193> PUT /tmp/tmpRPnr2l TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516/file.py
0.000 | 5986: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5987: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5988: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 5995: <149.202.161.193> (0, 'sftp> put /tmp/tmpRPnr2l /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516/file.py
0.000 | 5995: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 5995: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 5995: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 5995: debug1: auto-mux: Trying existing master\r
0.000 | 5995: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 5995: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 5995: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 5995: debug3: mux_client_request_session: entering\r
0.000 | 5995: debug3: mux_client_request_alive: entering\r
0.000 | 5995: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 5995: debug3: mux_client_request_session: session request sent\r
0.000 | 5995: debug1: mux_client_request_session: master session id: 2\r
0.000 | 5995: debug2: Remote version: 3\r
0.000 | 5995: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 5995: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 5995: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 5995: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 5995: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 5995: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 5995: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 5995: debug3: Looking up /tmp/tmpRPnr2l\r
0.000 | 5995: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 5995: debug3: Received stat reply T:101 I:2\r
0.000 | 5995: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 5995: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516/file.py\r
0.000 | 5995: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 5995: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5995: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 5995: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28892\r
0.000 | 5995: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5995: debug3: In write loop, ack for 5 28892 bytes at 32768\r
0.000 | 5995: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 5995: debug3: SSH2_FXP_STATUS 0\r
0.000 | 5995: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 5995: debug2: Received exit status from master 0\r
0.000 | 5995: ')
0.000 | 5996: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 5997: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 5998: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 5999: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6006: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6006: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6006: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6006: debug1: auto-mux: Trying existing master\r
0.000 | 6006: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6006: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6006: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6006: debug3: mux_client_request_session: entering\r
0.000 | 6006: debug3: mux_client_request_alive: entering\r
0.000 | 6006: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6006: debug3: mux_client_request_session: session request sent\r
0.000 | 6006: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6006: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6006: debug2: Received exit status from master 0\r
0.000 | 6006: ')
0.000 | 6007: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6008: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6009: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6010: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6073: }
0.000 | 6074:
0.000 | 6075: TASK [authorize TripleO Mistral key for user tripleo-admin] ********************
0.416 | 6076: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:15
0.000 | 6077: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/lineinfile.py
0.000 | 6078: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6079: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6080: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6081: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6088: <149.202.161.193> (0, '/home/jenkins
0.000 | 6088: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6088: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6088: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6088: debug1: auto-mux: Trying existing master\r
0.000 | 6088: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6088: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6088: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6088: debug3: mux_client_request_session: entering\r
0.000 | 6088: debug3: mux_client_request_alive: entering\r
0.000 | 6088: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6088: debug3: mux_client_request_session: session request sent\r
0.000 | 6088: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6088: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6088: debug2: Received exit status from master 0\r
0.000 | 6088: ')
0.000 | 6089: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6090: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6091: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6092: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6099: <149.202.161.193> (0, 'ansible-tmp-1505415912.64-64968773191150=/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150
0.000 | 6099: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6099: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6099: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6099: debug1: auto-mux: Trying existing master\r
0.000 | 6099: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6099: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6099: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6099: debug3: mux_client_request_session: entering\r
0.000 | 6099: debug3: mux_client_request_alive: entering\r
0.000 | 6099: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6099: debug3: mux_client_request_session: session request sent\r
0.000 | 6099: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6099: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6099: debug2: Received exit status from master 0\r
0.000 | 6099: ')
0.000 | 6100: <149.202.161.193> PUT /tmp/tmpUb4rld TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150/lineinfile.py
0.000 | 6101: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6102: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6103: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6110: <149.202.161.193> (0, 'sftp> put /tmp/tmpUb4rld /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150/lineinfile.py
0.000 | 6110: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6110: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6110: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6110: debug1: auto-mux: Trying existing master\r
0.000 | 6110: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6110: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6110: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6110: debug3: mux_client_request_session: entering\r
0.000 | 6110: debug3: mux_client_request_alive: entering\r
0.000 | 6110: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6110: debug3: mux_client_request_session: session request sent\r
0.000 | 6110: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6110: debug2: Remote version: 3\r
0.000 | 6110: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6110: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6110: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6110: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6110: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6110: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6110: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6110: debug3: Looking up /tmp/tmpUb4rld\r
0.000 | 6110: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6110: debug3: Received stat reply T:101 I:2\r
0.000 | 6110: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6110: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150/lineinfile.py\r
0.000 | 6110: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6110: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6110: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6110: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28744\r
0.000 | 6110: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6110: debug3: In write loop, ack for 5 28744 bytes at 32768\r
0.000 | 6110: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6110: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6110: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6110: debug2: Received exit status from master 0\r
0.000 | 6110: ')
0.000 | 6111: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6112: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6113: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6114: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6121: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6121: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6121: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6121: debug1: auto-mux: Trying existing master\r
0.000 | 6121: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6121: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6121: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6121: debug3: mux_client_request_session: entering\r
0.000 | 6121: debug3: mux_client_request_alive: entering\r
0.000 | 6121: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6121: debug3: mux_client_request_session: session request sent\r
0.000 | 6121: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6121: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6121: debug2: Received exit status from master 0\r
0.000 | 6121: ')
0.000 | 6122: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6123: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6124: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6125: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6189: Loading callback plugin default of type stdout, v2.0 from /usr/lib/python2.7/site-packages/ansible/plugins/callback/__init__.pyc
0.000 | 6190:
0.000 | 6191: PLAYBOOK: playbook.yaml ********************************************************
0.335 | 6192: 1 plays in /tmp/ansible-mistral-actionq4fk5K/playbook.yaml
0.000 | 6193:

0.000 | 6197: Using module file /usr/lib/python2.7/site-packages/ansible/modules/system/setup.py
0.000 | 6198: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6199: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6200: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6201: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.002 | 6208: <149.202.161.193> (0, '/home/jenkins
0.002 | 6208: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.002 | 6208: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.002 | 6208: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.002 | 6208: debug1: auto-mux: Trying existing master\r
0.002 | 6208: debug1: Control socket "/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f" does not exist\r
0.002 | 6208: debug2: resolving "149.202.161.193" port 22\r
0.002 | 6208: debug2: ssh_connect_direct: needpriv 0\r
0.002 | 6208: debug1: Connecting to 149.202.161.193 [149.202.161.193] port 22.\r
0.002 | 6208: debug2: fd 3 setting O_NONBLOCK\r
0.002 | 6208: debug1: fd 3 clearing O_NONBLOCK\r
0.002 | 6208: debug1: Connection established.\r
0.002 | 6208: debug3: timeout: 10000 ms remain after connect\r
0.002 | 6208: debug1: SELinux support enabled\r
0.002 | 6208: Could not create directory \'/home/mistral/.ssh\'.\r
0.002 | 6208: debug1: key_load_public: No such file or directory\r
0.002 | 6208: debug1: identity file /tmp/ansible-mistral-actionq4fk5K/ssh_private_key type -1\r
0.002 | 6208: debug1: key_load_public: No such file or directory\r
0.002 | 6208: debug1: identity file /tmp/ansible-mistral-actionq4fk5K/ssh_private_key-cert type -1\r
0.002 | 6208: debug1: Enabling compatibility mode for protocol 2.0\r
0.002 | 6208: debug1: Local version string SSH-2.0-OpenSSH_7.4\r
0.002 | 6208: debug1: Remote protocol version 2.0, remote software version OpenSSH_7.4\r
0.002 | 6208: debug1: match: OpenSSH_7.4 pat OpenSSH* compat 0x04000000\r
0.002 | 6208: debug2: fd 3 setting O_NONBLOCK\r
0.002 | 6208: debug1: Authenticating to 149.202.161.193:22 as \'jenkins\'\r
0.002 | 6208: debug3: send packet: type 20\r
0.002 | 6208: debug1: SSH2_MSG_KEXINIT sent\r
0.002 | 6208: debug3: receive packet: type 20\r
0.002 | 6208: debug1: SSH2_MSG_KEXINIT received\r
0.002 | 6208: debug2: local client KEXINIT proposal\r
0.002 | 6208: debug2: KEX algorithms: curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha256,diffie-hellman-group14-sha1,diffie-hellman-group1-sha1,ext-info-c\r
0.002 | 6208: debug2: host key algorithms: ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ssh-dss-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,rsa-sha2-512,rsa-sha2-256,ssh-rsa,ssh-dss\r
0.002 | 6208: debug2: ciphers ctos: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc\r
0.002 | 6208: debug2: ciphers stoc: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc\r
0.002 | 6208: debug2: MACs ctos: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 6208: debug2: MACs stoc: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 6208: debug2: compression ctos: zlib@openssh.com,zlib,none\r
0.002 | 6208: debug2: compression stoc: zlib@openssh.com,zlib,none\r
0.002 | 6208: debug2: languages ctos: \r
0.002 | 6208: debug2: languages stoc: \r
0.002 | 6208: debug2: first_kex_follows 0 \r
0.002 | 6208: debug2: reserved 0 \r
0.002 | 6208: debug2: peer server KEXINIT proposal\r
0.002 | 6208: debug2: KEX algorithms: curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha256,diffie-hellman-group14-sha1,diffie-hellman-group1-sha1\r
0.002 | 6208: debug2: host key algorithms: ssh-rsa,rsa-sha2-512,rsa-sha2-256,ecdsa-sha2-nistp256,ssh-ed25519\r
0.002 | 6208: debug2: ciphers ctos: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc,blowfish-cbc,cast128-cbc,3des-cbc\r
0.002 | 6208: debug2: ciphers stoc: chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc,blowfish-cbc,cast128-cbc,3des-cbc\r
0.002 | 6208: debug2: MACs ctos: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 6208: debug2: MACs stoc: umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1\r
0.002 | 6208: debug2: compression ctos: none,zlib@openssh.com\r
0.002 | 6208: debug2: compression stoc: none,zlib@openssh.com\r
0.002 | 6208: debug2: languages ctos: \r
0.002 | 6208: debug2: languages stoc: \r
0.002 | 6208: debug2: first_kex_follows 0 \r
0.002 | 6208: debug2: reserved 0 \r
0.002 | 6208: debug1: kex: algorithm: curve25519-sha256\r
0.002 | 6208: debug1: kex: host key algorithm: ecdsa-sha2-nistp256\r
0.002 | 6208: debug1: kex: server->client cipher: chacha20-poly1305@openssh.com MAC: <implicit> compression: zlib@openssh.com\r
0.002 | 6208: debug1: kex: client->server cipher: chacha20-poly1305@openssh.com MAC: <implicit> compression: zlib@openssh.com\r
0.002 | 6208: debug1: kex: curve25519-sha256 need=64 dh_need=64\r
0.002 | 6208: debug1: kex: curve25519-sha256 need=64 dh_need=64\r
0.002 | 6208: debug3: send packet: type 30\r
0.002 | 6208: debug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r
0.002 | 6208: debug3: receive packet: type 31\r
0.002 | 6208: debug1: Server host key: ecdsa-sha2-nistp256 SHA256:QjTnLFrvmlQNpkdDJwfGs31onDLyXf9Zre+9r40bp5w\r
0.002 | 6208: Failed to add the host to the list of known hosts (/home/mistral/.ssh/known_hosts).\r
0.002 | 6208: debug3: send packet: type 21\r
0.002 | 6208: debug2: set_newkeys: mode 1\r
0.002 | 6208: debug1: rekey after 134217728 blocks\r
0.002 | 6208: debug1: SSH2_MSG_NEWKEYS sent\r
0.002 | 6208: debug1: expecting SSH2_MSG_NEWKEYS\r
0.002 | 6208: debug3: receive packet: type 21\r
0.002 | 6208: debug1: SSH2_MSG_NEWKEYS received\r
0.002 | 6208: debug2: set_newkeys: mode 0\r
0.002 | 6208: debug1: rekey after 134217728 blocks\r
0.002 | 6208: debug2: key: /tmp/ansible-mistral-actionq4fk5K/ssh_private_key ((nil)), explicit\r
0.002 | 6208: debug3: send packet: type 5\r
0.002 | 6208: debug3: receive packet: type 7\r
0.002 | 6208: debug1: SSH2_MSG_EXT_INFO received\r
0.002 | 6208: debug1: kex_input_ext_info: server-sig-algs=<rsa-sha2-256,rsa-sha2-512>\r
0.002 | 6208: debug3: receive packet: type 6\r
0.002 | 6208: debug2: service_accept: ssh-userauth\r
0.002 | 6208: debug1: SSH2_MSG_SERVICE_ACCEPT received\r
0.002 | 6208: debug3: send packet: type 50\r
0.002 | 6208: debug3: receive packet: type 51\r
0.002 | 6208: debug1: Authentications that can continue: publickey,gssapi-keyex,gssapi-with-mic,password\r
0.002 | 6208: debug3: start over, passed a different list publickey,gssapi-keyex,gssapi-with-mic,password\r
0.002 | 6208: debug3: preferred gssapi-with-mic,gssapi-keyex,hostbased,publickey\r
0.002 | 6208: debug3: authmethod_lookup gssapi-with-mic\r
0.002 | 6208: debug3: remaining preferred: gssapi-keyex,hostbased,publickey\r
0.002 | 6208: debug3: authmethod_is_enabled gssapi-with-mic\r
0.002 | 6208: debug1: Next authentication method: gssapi-with-mic\r
0.002 | 6208: debug1: Unspecified GSS failure. Minor code may provide more information
0.002 | 6208: No Kerberos credentials available (default cache: KEYRING:persistent:991)
0.002 | 6208: \r
0.002 | 6208: debug1: Unspecified GSS failure. Minor code may provide more information
0.002 | 6208: No Kerberos credentials available (default cache: KEYRING:persistent:991)
0.002 | 6208: \r
0.002 | 6208: debug2: we did not send a packet, disable method\r
0.002 | 6208: debug3: authmethod_lookup gssapi-keyex\r
0.002 | 6208: debug3: remaining preferred: hostbased,publickey\r
0.002 | 6208: debug3: authmethod_is_enabled gssapi-keyex\r
0.002 | 6208: debug1: Next authentication method: gssapi-keyex\r
0.002 | 6208: debug1: No valid Key exchange context\r
0.002 | 6208: debug2: we did not send a packet, disable method\r
0.002 | 6208: debug3: authmethod_lookup publickey\r
0.002 | 6208: debug3: remaining preferred: ,publickey\r
0.002 | 6208: debug3: authmethod_is_enabled publickey\r
0.002 | 6208: debug1: Next authentication method: publickey\r
0.002 | 6208: debug1: Trying private key: /tmp/ansible-mistral-actionq4fk5K/ssh_private_key\r
0.002 | 6208: debug3: sign_and_send_pubkey: RSA SHA256:pHIV+tENqFn52aon+j4HlKDu9wf8tPZsYaodkGIL7rk\r
0.002 | 6208: debug3: send packet: type 50\r
0.002 | 6208: debug2: we sent a publickey packet, wait for reply\r
0.002 | 6208: debug3: receive packet: type 52\r
0.002 | 6208: debug1: Enabling compression at level 6.\r
0.002 | 6208: debug1: Authentication succeeded (publickey).\r
0.002 | 6208: Authenticated to 149.202.161.193 ([149.202.161.193]:22).\r
0.002 | 6208: debug1: setting up multiplex master socket\r
0.002 | 6208: debug3: muxserver_listen: temporary control path /tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f.lsFoyTVSvT0J2bdT\r
0.002 | 6208: debug2: fd 4 setting O_NONBLOCK\r
0.002 | 6208: debug3: fd 4 is O_NONBLOCK\r
0.002 | 6208: debug3: fd 4 is O_NONBLOCK\r
0.002 | 6208: debug1: channel 0: new [/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f]\r
0.002 | 6208: debug3: muxserver_listen: mux listener channel 0 fd 4\r
0.002 | 6208: debug2: fd 3 setting TCP_NODELAY\r
0.002 | 6208: debug3: ssh_packet_set_tos: set IP_TOS 0x08\r
0.002 | 6208: debug1: control_persist_detach: backgrounding master process\r
0.002 | 6208: debug2: control_persist_detach: background process is 9308\r
0.002 | 6208: debug2: fd 4 setting O_NONBLOCK\r
0.002 | 6208: debug1: forking to background\r
0.002 | 6208: debug1: Entering interactive session.\r
0.002 | 6208: debug1: pledge: id\r
0.002 | 6208: debug2: set_control_persist_exit_time: schedule exit in 60 seconds\r
0.002 | 6208: debug1: multiplexing control connection\r
0.002 | 6208: debug2: fd 5 setting O_NONBLOCK\r
0.002 | 6208: debug3: fd 5 is O_NONBLOCK\r
0.002 | 6208: debug1: channel 1: new [mux-control]\r
0.002 | 6208: debug3: channel_post_mux_listener: new mux channel 1 fd 5\r
0.002 | 6208: debug3: mux_master_read_cb: channel 1: hello sent\r
0.002 | 6208: debug2: set_control_persist_exit_time: cancel scheduled exit\r
0.002 | 6208: debug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r
0.002 | 6208: debug2: process_mux_master_hello: channel 1 slave version 4\r
0.002 | 6208: debug2: mux_client_hello_exchange: master version 4\r
0.002 | 6208: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.002 | 6208: debug3: mux_client_request_session: entering\r
0.002 | 6208: debug3: mux_client_request_alive: entering\r
0.002 | 6208: debug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r
0.002 | 6208: debug2: process_mux_alive_check: channel 1: alive check\r
0.002 | 6208: debug3: mux_client_request_alive: done pid = 9310\r
0.002 | 6208: debug3: mux_client_request_session: session request sent\r
0.002 | 6208: debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 66\r
0.002 | 6208: debug2: process_mux_new_session: channel 1: request tty 0, X 0, agent 0, subsys 0, term "", cmd "/bin/sh -c \'echo ~ && sleep 0\'", env 0\r
0.002 | 6208: debug3: process_mux_new_session: got fds stdin 6, stdout 7, stderr 8\r
0.002 | 6208: debug2: fd 7 setting O_NONBLOCK\r
0.002 | 6208: debug2: fd 8 setting O_NONBLOCK\r
0.002 | 6208: debug1: channel 2: new [client-session]\r
0.002 | 6208: debug2: process_mux_new_session: channel_new: 2 linked to control channel 1\r
0.002 | 6208: debug2: channel 2: send open\r
0.002 | 6208: debug3: send packet: type 90\r
0.002 | 6208: debug3: receive packet: type 80\r
0.002 | 6208: debug1: client_input_global_request: rtype hostkeys-00@openssh.com want_reply 0\r
0.002 | 6208: debug3: receive packet: type 91\r
0.002 | 6208: debug2: callback start\r
0.002 | 6208: debug2: client_session2_setup: id 2\r
0.002 | 6208: debug1: Sending command: /bin/sh -c \'echo ~ && sleep 0\'\r
0.002 | 6208: debug2: channel 2: request exec confirm 1\r
0.002 | 6208: debug3: send packet: type 98\r
0.002 | 6208: debug3: mux_session_confirm: sending success reply\r
0.002 | 6208: debug2: callback done\r
0.002 | 6208: debug2: channel 2: open confirm rwindow 0 rmax 32768\r
0.002 | 6208: debug1: mux_client_request_session: master session id: 2\r
0.002 | 6208: debug2: channel 2: rcvd adjust 2097152\r
0.002 | 6208: debug3: receive packet: type 99\r
0.002 | 6208: debug2: channel_input_status_confirm: type 99 id 2\r
0.002 | 6208: debug2: exec request accepted on channel 2\r
0.002 | 6208: debug3: receive packet: type 98\r
0.002 | 6208: debug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r
0.002 | 6208: debug3: mux_exit_message: channel 2: exit message, exitval 0\r
0.002 | 6208: debug3: receive packet: type 98\r
0.002 | 6208: debug1: client_input_channel_req: channel 2 rtype eow@openssh.com reply 0\r
0.002 | 6208: debug2: channel 2: rcvd eow\r
0.002 | 6208: debug2: channel 2: close_read\r
0.002 | 6208: debug2: channel 2: input open -> closed\r
0.002 | 6208: debug3: receive packet: type 96\r
0.002 | 6208: debug2: channel 2: rcvd eof\r
0.002 | 6208: debug2: channel 2: output open -> drain\r
0.002 | 6208: debug2: channel 2: obuf empty\r
0.002 | 6208: debug2: channel 2: close_write\r
0.002 | 6208: debug2: channel 2: output drain -> closed\r
0.002 | 6208: debug3: receive packet: type 97\r
0.002 | 6208: debug2: channel 2: rcvd close\r
0.002 | 6208: debug3: channel 2: will not send data after close\r
0.002 | 6208: debug2: channel 2: send close\r
0.002 | 6208: debug3: send packet: type 97\r
0.002 | 6208: debug2: channel 2: is dead\r
0.002 | 6208: debug2: channel 2: gc: notify user\r
0.002 | 6208: debug3: mux_master_session_cleanup_cb: entering for channel 2\r
0.002 | 6208: debug2: channel 1: rcvd close\r
0.002 | 6208: debug2: channel 1: output open -> drain\r
0.002 | 6208: debug2: channel 1: close_read\r
0.002 | 6208: debug2: channel 1: input open -> closed\r
0.002 | 6208: debug2: channel 2: gc: user detached\r
0.002 | 6208: debug2: channel 2: is dead\r
0.002 | 6208: debug2: channel 2: garbage collecting\r
0.002 | 6208: debug1: channel 2: free: client-session, nchannels 3\r
0.002 | 6208: debug3: channel 2: status: The following connections are open:\r
0.002 | 6208: #1 mux-control (t16 r-1 i3/0 o1/16 fd 5/5 cc -1)\r
0.002 | 6208: #2 client-session (t4 r0 i3/0 o3/0 fd -1/-1 cc -1)\r
0.002 | 6208: \r
0.002 | 6208: debug2: channel 1: obuf empty\r
0.002 | 6208: debug2: channel 1: close_write\r
0.002 | 6208: debug2: channel 1: output drain -> closed\r
0.002 | 6208: debug2: channel 1: is dead (local)\r
0.002 | 6208: debug2: channel 1: gc: notify user\r
0.002 | 6208: debug3: mux_master_control_cleanup_cb: entering for channel 1\r
0.002 | 6208: debug2: channel 1: gc: user detached\r
0.002 | 6208: debug2: channel 1: is dead (local)\r
0.002 | 6208: debug2: channel 1: garbage collecting\r
0.002 | 6208: debug1: channel 1: free: mux-control, nchannels 2\r
0.002 | 6208: debug3: channel 1: status: The following connections are open:\r
0.002 | 6208: #1 mux-control (t16 r-1 i3/0 o3/0 fd 5/5 cc -1)\r
0.002 | 6208: \r
0.002 | 6208: debug2: set_control_persist_exit_time: schedule exit in 60 seconds\r
0.002 | 6208: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.002 | 6208: debug2: Received exit status from master 0\r
0.002 | 6208: ')
0.000 | 6209: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6210: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6211: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6212: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6219: <149.202.161.193> (0, 'ansible-tmp-1505415905.05-163701451985434=/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434
0.000 | 6219: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6219: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6219: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6219: debug1: auto-mux: Trying existing master\r
0.000 | 6219: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6219: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6219: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6219: debug3: mux_client_request_session: entering\r
0.000 | 6219: debug3: mux_client_request_alive: entering\r
0.000 | 6219: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6219: debug3: mux_client_request_session: session request sent\r
0.000 | 6219: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6219: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6219: debug2: Received exit status from master 0\r
0.000 | 6219: ')
0.000 | 6220: <149.202.161.193> PUT /tmp/tmp4nQKPf TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py
0.000 | 6221: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6222: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6223: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6230: <149.202.161.193> (0, 'sftp> put /tmp/tmp4nQKPf /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py
0.000 | 6230: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6230: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6230: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6230: debug1: auto-mux: Trying existing master\r
0.000 | 6230: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6230: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6230: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6230: debug3: mux_client_request_session: entering\r
0.000 | 6230: debug3: mux_client_request_alive: entering\r
0.000 | 6230: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6230: debug3: mux_client_request_session: session request sent\r
0.000 | 6230: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6230: debug2: Remote version: 3\r
0.000 | 6230: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6230: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6230: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6230: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6230: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6230: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6230: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6230: debug3: Looking up /tmp/tmp4nQKPf\r
0.000 | 6230: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6230: debug3: Received stat reply T:101 I:2\r
0.000 | 6230: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6230: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py\r
0.000 | 6230: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6230: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6230: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6230: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:32768\r
0.000 | 6230: debug3: Sent message SSH2_FXP_WRITE I:6 O:65536 S:32768\r
0.000 | 6230: debug3: Sent message SSH2_FXP_WRITE I:7 O:98304 S:5484\r
0.000 | 6230: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6230: debug3: In write loop, ack for 5 32768 bytes at 32768\r
0.000 | 6230: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6230: debug3: In write loop, ack for 6 32768 bytes at 65536\r
0.000 | 6230: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6230: debug3: In write loop, ack for 7 5484 bytes at 98304\r
0.000 | 6230: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6230: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6230: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6230: debug2: Received exit status from master 0\r
0.000 | 6230: ')
0.000 | 6231: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6232: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6233: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6234: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6241: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6241: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6241: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6241: debug1: auto-mux: Trying existing master\r
0.000 | 6241: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6241: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6241: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6241: debug3: mux_client_request_session: entering\r
0.000 | 6241: debug3: mux_client_request_alive: entering\r
0.000 | 6241: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6241: debug3: mux_client_request_session: session request sent\r
0.000 | 6241: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6241: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6241: debug2: Received exit status from master 0\r
0.000 | 6241: ')
0.000 | 6242: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6243: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6244: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6245: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6249: <149.202.161.193> SSH: PlayContext set ssh_extra_args: ()
0.066 | 6250: <149.202.161.193> SSH: found only ControlPersist; added ControlPath: (-o)(ControlPath=/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f)
0.044 | 6251: <149.202.161.193> SSH: EXEC ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o 'IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key"' -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o User=jenkins -o ConnectTimeout=10 '-o StrictHostKeyChecking=no' -o ControlPath=/tmp/ansible-mistral-actionq4fk5K/.ansible/cp/e5ef5ddf9f -tt 149.202.161.193 '/bin/sh -c '"'"'sudo -H -S -n -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-teymqwmnyccclmdxyoahyempwuibugiz; /usr/bin/python /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py; rm -rf "/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/" > /dev/null 2>&1'"'"'"'"'"'"'"'"' && sleep 0'"'"''
0.233 | 6252: <149.202.161.193> (0, '\r
0.233 | 6252: {"invocation": {"module_args": {"filter": "*", "gather_subset": ["all"], "fact_path": "/etc/ansible/facts.d", "gather_timeout": 10}}, "changed": false, "ansible_facts": {"facter_operatingsystem": "CentOS", "facter_selinux_current_mode": "enforcing", "ansible_real_group_id": 0, "facter_hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "module_setup": true, "facter_uptime_hours": 1, "ansible_distribution_version": "7.4.1708", "facter_sshfp_rsa": "SSHFP 1 1 89d0d78f9560c406110696027a21b1b8ab57c131\
0.233 | 6252: SSHFP 1 2 4f99546e76a088bbcd63d73e78c6735a073b997a1546488b925460f2eda8ed04", "ansible_env": {"USERNAME": "root", "LANG": "en_US.UTF-8", "TERM": "unknown", "SHELL": "/bin/bash", "SUDO_COMMAND": "/bin/sh -c echo BECOME-SUCCESS-teymqwmnyccclmdxyoahyempwuibugiz; /usr/bin/python /home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/setup.py; rm -rf \\"/home/jenkins/.ansible/tmp/ansible-tmp-1505415905.05-163701451985434/\\" > /dev/null 2>&1", "SHLVL": "1", "SUDO_UID": "1000", "SUDO_GID": "1000", "PWD": "/home/jenkins", "LOGNAME": "root", "USER": "root", "PATH": "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin/", "MAIL": "/var/mail/jenkins", "SUDO_USER": "jenkins", "LS_COLORS": "", "HOME": "/root", "_": "/usr/bin/python"}, "facter_swapsize_mb": "8192.00", "facter_lsbmajdistrelease": "7", "facter_gid": "root", "ansible_userspace_bits": "64", "ansible_architecture": "x86_64", "ansible_default_ipv4": {"macaddress": "fa:16:3e:8f:1c:3d", "network": "149.202.161.193", "mtu": 1500, "broadcast": "149.202.161.193", "alias": "eth0", "netmask": "255.255.255.255", "address": "149.202.161.193", "interface": "eth0", "type": "ether", "gateway": "149.202.160.1"}, "ansible_swapfree_mb": 8191, "facter_netmask_eth0": "255.255.255.255", "facter_uuid": "50A0291F-48F8-4E97-8222-92D27C9EFE59", "ansible_cmdline": {"no_timer_check": true, "nomodeset": true, "BOOT_IMAGE": "/boot/vmlinuz-3.10.0-693.2.2.el7.x86_64", "vga": "normal", "console": "ttyS0,115200", "ro": true, "root": "LABEL=cloudimg-rootfs", "nofb": true}, "ansible_selinux": {"status": "enabled", "policyvers": 28, "type": "targeted", "mode": "enforcing", "config_mode": "enforcing"}, "facter_bios_vendor": "SeaBIOS", "ansible_ovs_system": {"macaddress": "fe:44:06:15:b0:4d", "features": {}, "mtu": 1500, "device": "ovs-system", "promisc": true, "active": false, "type": "ether"}, "ansible_userspace_architecture": "x86_64", "facter_lsbdistid": "CentOS", "facter_osfamily": "RedHat", "ansible_pkg_mgr": "yum", "facter_filesystems": "ext2,ext3,ext4,iso9660", "ansible_memory_mb": {"real": {"total": 7792, "used": 3726, "free": 4066}, "swap": {"cached": 0, "total": 8191, "free": 8191, "used": 0}, "nocache": {"used": 879, "free": 6913}}, "ansible_distribution": "CentOS", "ansible_user_dir": "/root", "facter_puppetversion": "4.8.2", "facter_lsbdistrelease": "7.4.1708", "ansible_all_ipv6_addresses": ["fe80::a41d:13ff:fece:75d0", "fe80::8c0e:9eff:fe4c:d45", "fe80::f816:3eff:fe8f:1c3d"], "facter_operatingsystemrelease": "7.4.1708", "ansible_uptime_seconds": 3969, "ansible_kernel": "3.10.0-693.2.2.el7.x86_64", "facter_bios_version": "2:1.10.2-58953eb7", "ansible_system_capabilities_enforced": "True", "ansible_python": {"executable": "/usr/bin/python", "version": {"micro": 5, "major": 2, "releaselevel": "final", "serial": 0, "minor": 7}, "type": "CPython", "has_sslcontext": true, "version_info": [2, 7, 5, "final", 0]}, "ansible_user_shell": "/bin/bash", "facter_processor3": "Intel Core Processor (Haswell, no TSX)", "facter_selinux_config_policy": "targeted", "facter_architecture": "x86_64", "facter_lsbminordistrelease": "4", "ansible_product_serial": "00782d65-d0de-e411-8000-001e67caf4f2", "facter_netmask_br_ex": "255.255.255.0", "facter_blockdevice_vda_size": 85899345920, "facter_fqdn": "centos-7-2-node-ovh-gra1-10937570-899082", "ansible_fips": false, "facter_system_uptime": {"seconds": 3967, "hours": 1, "uptime": "1:06 hours", "days": 0}, "ansible_user_id": "root", "facter_ipaddress_lo": "127.0.0.1", "facter_os": {"release": {"major": "7", "full": "7.4.1708", "minor": "4"}, "lsb": {"distdescription": "CentOS Linux release 7.4.1708 (Core) ", "majdistrelease": "7", "distrelease": "7.4.1708", "release": ":core-4.1-amd64:core-4.1-noarch", "distid": "CentOS", "minordistrelease": "4", "distcodename": "Core"}, "name": "CentOS", "family": "RedHat"}, "facter_operatingsystemmajrelease": "7", "facter_manufacturer": "OpenStack Foundation", "facter_mtu_ovs_system": 1500, "facter_augeasversion": "1.4.0", "facter_memorysize": "7.61 GB", "ansible_vxlan_sys_4789": {"macaddress": "a6:1d:13:ce:75:d0", "features": {}, "mtu": 65470, "device": "vxlan_sys_4789", "promisc": true, "ipv6": [{"scope": "link", "prefix": "64", "address": "fe80::a41d:13ff:fece:75d0"}], "active": true, "type": "ether"}, "facter_path": "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin/", "ansible_processor_vcpus": 8, "facter_network_eth0": "149.202.161.193", "ansible_processor": ["GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)", "GenuineIntel", "Intel Core Processor (Haswell, no TSX)"], "facter_is_virtual": true, "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIx21Jc8Uo0qj7s3h4dP5sJfV3ywMwOCRb1LfZFDp6PpPwrOcmO18h/JoezZ/NODahprEFRzN1y1uMElgteuPk4=", "ansible_mounts": [{"uuid": "2d930d32-4be2-4440-8fec-05114b535f83", "size_total": 80472600576, "mount": "/", "size_available": 58693193728, "fstype": "ext4", "device": "/dev/vda1", "options": "rw,seclabel,relatime,data=ordered"}, {"uuid": "2017-09-14-19-58-45-00", "size_total": 456704, "mount": "/mnt/config", "size_available": 0, "fstype": "iso9660", "device": "/dev/sr0", "options": "ro,relatime,mode=0700"}], "ansible_system_vendor": "OpenStack Foundation", "facter_selinux_policyversion": "28", "facter_sshed25519key": "AAAAC3NzaC1lZDI1NTE5AAAAIMiDcCDeVqcgiZ5Kq6zKs/G85DVDZZvN0jlcrTkb+8Jd", "ansible_virtualization_role": "guest", "ansible_swaptotal_mb": 8191, "facter_selinux_config_mode": "enforcing", "facter_selinux": true, "facter_partitions": {"vda1": {"size": "167770079", "mount": "/", "filesystem": "ext4", "uuid": "2d930d32-4be2-4440-8fec-05114b535f83", "label": "cloudimg-rootfs"}}, "facter_rubyversion": "2.0.0", "ansible_distribution_major_version": "7", "facter_lsbdistdescription": "CentOS Linux release 7.4.1708 (Core) ", "ansible_lsb": {"release": "7.4.1708", "major_release": "7", "codename": "Core", "id": "CentOS", "description": "CentOS Linux release 7.4.1708 (Core)"}, "facter_memoryfree": "6.71 GB", "facter_memorysize_mb": "7792.93", "ansible_default_ipv6": {}, "facter_kernelmajversion": "3.10", "ansible_machine": "x86_64", "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDM1gU/qTArTZ7nQdQ6ZvL7Q6wNxBJclWLUg+TIPX9jLS7jg0enYcdH4C6ZVqjiceYllLo272fbl8kLWOW/YhLp1vIuAz2VVfv70zHy04SyVpeY4utMFccnpZVaiK8uABQkO8PZD0HalDY5BMN9cr6jq/0Jd/ZYVcYPsT3aleSN3CMdAdFMA9DqGDdBKp9UcGaXJ3nt3l+H+XzhyyKOAVu/r48VoUPekU5mkYPVQm/JJnj8yvqRsIalgXJo1uN0zieqgzY5jWV6UjUta0WC27ebwr8TJ/9zZOBE20A1QQmU8bDMvyPu5IajK9DP048rCGoSOP/zKBRGyS7Pvxd8raDx", "facter_ipaddress": "192.168.24.3", "ansible_user_gecos": "root", "facter_kernelrelease": "3.10.0-693.2.2.el7.x86_64", "ansible_br_ex": {"macaddress": "8e:0e:9e:4c:0d:45", "features": {}, "mtu": 1450, "device": "br-ex", "promisc": true, "ipv4": {"broadcast": "global", "netmask": "255.255.255.0", "network": "192.168.24.0", "address": "192.168.24.3"}, "ipv6": [{"scope": "link", "prefix": "64", "address": "fe80::8c0e:9eff:fe4c:d45"}], "active": true, "type": "ether"}, "ansible_processor_threads_per_core": 1, "facter_serialnumber": "00782d65-d0de-e411-8000-001e67caf4f2", "ansible_eth0": {"macaddress": "fa:16:3e:8f:1c:3d", "features": {}, "pciid": "virtio0", "module": "virtio_net", "mtu": 1500, "device": "eth0", "promisc": false, "ipv4": {"broadcast": "149.202.161.193", "netmask": "255.255.255.255", "network": "149.202.161.193", "address": "149.202.161.193"}, "ipv6": [{"scope": "link", "prefix": "64", "address": "fe80::f816:3eff:fe8f:1c3d"}], "active": true, "type": "ether"}, "ansible_system": "Linux", "facter_mtu_lo": 65536, "ansible_all_ipv4_addresses": ["192.168.24.3", "149.202.161.193"], "ansible_python_version": "2.7.5", "facter_kernel": "Linux", "facter_sshecdsakey": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIx21Jc8Uo0qj7s3h4dP5sJfV3ywMwOCRb1LfZFDp6PpPwrOcmO18h/JoezZ/NODahprEFRzN1y1uMElgteuPk4=", "ansible_product_version": "2014.2.4", "ansible_service_mgr": "systemd", "facter_uptime_seconds": 3967, "facter_uptime_days": 0, "ansible_memtotal_mb": 7792, "facter_processor7": "Intel Core Processor (Haswell, no TSX)", "facter_processor6": "Intel Core Processor (Haswell, no TSX)", "facter_processor5": "Intel Core Processor (Haswell, no TSX)", "facter_processor4": "Intel Core Processor (Haswell, no TSX)", "facter_bios_release_date": "04/01/2014", "facter_processor2": "Intel Core Processor (Haswell, no TSX)", "facter_processor1": "Intel Core Processor (Haswell, no TSX)", "facter_processor0": "Intel Core Processor (Haswell, no TSX)", "facter_rubysitedir": "/usr/local/share/ruby/site_ruby/", "ansible_real_user_id": 0, "facter_macaddress": "8e:0e:9e:4c:0d:45", "ansible_dns": {"nameservers": ["127.0.0.1"]}, "ansible_effective_group_id": 0, "ansible_form_factor": "Other", "facter_macaddress_vxlan_sys_4789": "a6:1d:13:ce:75:d0", "facter_productname": "OpenStack Nova", "ansible_lo": {"features": {}, "mtu": 65536, "device": "lo", "promisc": false, "ipv4": {"broadcast": "host", "netmask": "255.0.0.0", "network": "127.0.0.0", "address": "127.0.0.1"}, "ipv6": [{"scope": "host", "prefix": "128", "address": "::1"}], "active": true, "type": "loopback"}, "facter_swapsize": "8.00 GB", "facter_blockdevices": "sr0,vda", "facter_macaddress_ovs_system": "fe:44:06:15:b0:4d", "facter_facterversion": "2.4.4", "ansible_gather_subset": ["hardware", "network", "virtual"], "ansible_apparmor": {"status": "disabled"}, "facter_interfaces": "br_ex,eth0,lo,ovs_system,vxlan_sys_4789", "facter_network_lo": "127.0.0.0", "facter_processorcount": 8, "facter_netmask_lo": "255.0.0.0", "facter_swapfree": "8.00 GB", "ansible_memfree_mb": 4066, "facter_mtu_eth0": 1500, "facter_sshrsakey": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDM1gU/qTArTZ7nQdQ6ZvL7Q6wNxBJclWLUg+TIPX9jLS7jg0enYcdH4C6ZVqjiceYllLo272fbl8kLWOW/YhLp1vIuAz2VVfv70zHy04SyVpeY4utMFccnpZVaiK8uABQkO8PZD0HalDY5BMN9cr6jq/0Jd/ZYVcYPsT3aleSN3CMdAdFMA9DqGDdBKp9UcGaXJ3nt3l+H+XzhyyKOAVu/r48VoUPekU5mkYPVQm/JJnj8yvqRsIalgXJo1uN0zieqgzY5jWV6UjUta0WC27ebwr8TJ/9zZOBE20A1QQmU8bDMvyPu5IajK9DP048rCGoSOP/zKBRGyS7Pvxd8raDx", "ansible_product_name": "OpenStack Nova", "facter_kernelversion": "3.10.0", "facter_processors": {"models": ["Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)", "Intel Core Processor (Haswell, no TSX)"], "count": 8, "physicalcount": 8}, "facter_physicalprocessorcount": 8, "ansible_processor_count": 8, "facter_uniqueid": "ca95c1a1", "ansible_hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "facter_timezone": "UTC", "facter_ipaddress_br_ex": "192.168.24.3", "facter_sshfp_ecdsa": "SSHFP 3 1 973e61eac44b8a6d326eb3b9765626e35af7cec4\
0.233 | 6252: SSHFP 3 2 4234e72c5aef9a540da647432707c6b37d689c32f25dff59adefbdaf8d1ba79c", "ansible_interfaces": ["lo", "ovs-system", "br-ex", "vxlan_sys_4789", "eth0"], "ansible_machine_id": "aa9b364793ba47a28b44d1f9ac674146", "facter_ipaddress_eth0": "149.202.161.193", "facter_virtual": "kvm", "ansible_fqdn": "centos-7-2-node-ovh-gra1-10937570-899082", "ansible_user_gid": 0, "facter_ps": "ps -ef", "facter_netmask": "255.255.255.0", "ansible_nodename": "centos-7-2-node-ovh-gra1-10937570-899082", "facter_mtu_vxlan_sys_4789": 65470, "facter_rubyplatform": "x86_64-linux", "facter_hardwareisa": "x86_64", "ansible_lvm": {"lvs": {}, "vgs": {}}, "ansible_product_uuid": "50A0291F-48F8-4E97-8222-92D27C9EFE59", "ansible_system_capabilities": ["cap_chown", "cap_dac_override", "cap_dac_read_search", "cap_fowner", "cap_fsetid", "cap_kill", "cap_setgid", "cap_setuid", "cap_setpcap", "cap_linux_immutable", "cap_net_bind_service", "cap_net_broadcast", "cap_net_admin", "cap_net_raw", "cap_ipc_lock", "cap_ipc_owner", "cap_sys_module", "cap_sys_rawio", "cap_sys_chroot", "cap_sys_ptrace", "cap_sys_pacct", "cap_sys_admin", "cap_sys_boot", "cap_sys_nice", "cap_sys_resource", "cap_sys_time", "cap_sys_tty_config", "cap_mknod", "cap_lease", "cap_audit_write", "cap_audit_control", "cap_setfcap", "cap_mac_override", "cap_mac_admin", "cap_syslog", "35", "36+ep"], "facter_mtu_br_ex": 1450, "ansible_domain": "", "facter_blockdevice_vda_vendor": "0x1af4", "ansible_date_time": {"weekday_number": "4", "iso8601_basic_short": "20170914T190509", "tz": "UTC", "weeknumber": "37", "hour": "19", "year": "2017", "minute": "05", "tz_offset": "+0000", "month": "09", "epoch": "1505415909", "iso8601_micro": "2017-09-14T19:05:09.491160Z", "weekday": "Thursday", "time": "19:05:09", "date": "2017-09-14", "iso8601": "2017-09-14T19:05:09Z", "day": "14", "iso8601_basic": "20170914T190509491070", "second": "09"}, "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIMiDcCDeVqcgiZ5Kq6zKs/G85DVDZZvN0jlcrTkb+8Jd", "ansible_processor_cores": 1, "facter_macaddress_br_ex": "8e:0e:9e:4c:0d:45", "facter_macaddress_eth0": "fa:16:3e:8f:1c:3d", "ansible_virtualization_type": "openstack", "facter_ec2_metadata": {"instance-type": "ssd-osFoundation-3", "local-ipv4": "149.202.161.193", "reservation-id": "r-rj0lyd87", "local-hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "placement": {"availability-zone": "nova"}, "ami-launch-index": "0", "public-hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "hostname": "centos-7-2-node-ovh-gra1-10937570-899082", "ramdisk-id": "None", "public-keys": {"0": {"openssh-key": ["ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLsTZJ8hXTmzjKxYh/7V07mIy8xl2HL+9BaUlt6A6TMsL3LSvaVQNSgmXX5g0XfPWSCKmkZb1O28q49jQI2n7n7+sHkxn0dJDxj1N2oNrzNY7pDuPrdtCijczLFdievygXNhXNkQ2WIqHXDquN/jfLLJ9L0jxtxtsUMbiL2xxZEZcaf/K5MqyPhscpqiVNE1MjE4xgPbIbv8gCKtPpYIIrktOMb4JbV7rhOp5DcSP5gXtLhOF5fbBpZ+szqrTVUcBX0oTYr3iRfOje9WPsTZIk9vBfBtF416mCNxMSRc7KhSW727AnUu85hS0xiP0MRAf69KemG1OE1pW+LtDIAEYp mordred@camelot", "", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvKYcWK1T7e3PKSFiqb03EYktnoxVASpPoq2rJw2JvhsP0JfS+lKrPzpUQv7L4JCuQMsPNtZ8LnwVEft39k58Kh8XMebSfaqPYAZS5zCNvQUQIhP9myOevBZf4CDeG+gmssqRFcWEwIllfDuIzKBQGVbomR+Y5QuW0HczIbkoOYI6iyf2jB6xg+bmzR2HViofNrSa62CYmHS6dO04Z95J27w6jGWpEOTBjEQvnb9sdBc4EzaBVmxCpa2EilB1u0th7/DvuH0yP4T+X8G8UjW1gZCTOVw06fqlBCST4KjdWw1F/AuOCT7048klbf4H+mCTaEcPzzu3Fkv8ckMWtS/Z9Q== jeblair@operational-necessity", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCnfoVhOTkrY7uoebL8PoHXb0Fg4jJqGCbwkxUdNUdheIdbnfyjuRG3iL8WZnzf7nzWnD+IGo6kkAo8BkNMK9L0P0Y+5IjI8NH49KU22tQ1umij4EIf5tzLh4gsqkJmy6QLrlbf10m6UF4rLFQhKzOd4b2H2K6KbP00CIymvbW3BwvNDODM4xRE2uao387qfvXZBUkB0PpRD+7fWPoN58gpFUm407Eba3WwX5PCD+1DD+RVBsG8maIDXerQ7lvFLoSuyMswv1TfkvCj0ZFhSFbfTd2ZysCu6eryFfeixR7NY9SNcp9YTqG6LrxGA7Ci6wz+hycFHXlDrlBgfFJDe5At clark@work", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3KnRBTH5QPpKjf4RWu4akzYt2gwp796cMkFl5vu8e7G/cHuh4979FeNJXMVP6F3rvZB+yXDHLCU5LBVLq0K+1GbAZT/hH38hpMOIvniwKIquvI6C/drkVPHO6YmVlapw/NI530PGnT/TAqCOycHBO5eF1bYsaqV1yZqvs9v7UZc6J4LukoLZwpmyWZ5P3ltAiiy8+FGq3SLCKWDMmv/Bjz4zTsaNbSWThJi0BydINjC1/0ze5Tyc/XgW1sDuxmmXJxgQp4EvLpronqb2hT60iA52kj8lrmoCIryRpgnbaRA7BrxKF8zIr0ZALHijxEUeWHhFJDIVRGUf0Ef0nrmBv fungi-openstack-2015", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHGuIVB/WxBd7k1R8x2FyfqT6KxRnoM7lE5RE8gvBk2r8cQeH5k1c+P5JrBvWpmqXv4satoivYOBiIb7JXEgIxx62YUx/JQ0J7k3w+av6h4iFe2OhOtEOjMF5F8/wO8a/95OeTZPzBZlUfA3hx754kuw3Q/aBKQUOHWxJOIedGyVHeJc7XiFj3RXIufFuUfng9+p4Z3q6d2/WpuKqs00WI0CLF17PkU4i8P9CraJR1dmsWW6zoxMT2G+DwMFI7ZMS3xrVBRuLwrLlbylVLW2kOJ0JeyjHnRh7X1kR7KG3cGOOjA1YQ0e+mXvremcO3/3o6Iop/N1AtqVuYCKlZc7Y9 slukjanov@mirantis.com", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL9x1rhTVOEQEanrN+ecycaDtAbbh3kr41Rxx7galtLq0JwftjsZqv2Vwl9c8ARmm8HiHcLwDoaZB9gvs6teMScCB+5a1fcohiycJBl2olNFRzkGapDaTvl74aLXQBWaV84D8tUavEl26zcgwrv9WLUsy9pnHoo5K0BzbK7vT2g3VictCphveC2vdjCDeptocWvt4zxCmAY6O7QMKeUjKMlvuy+zCohJcR4BbDnw8EriFAmCeQZcAgfLTyeAvjo384NNIFWyhCwvbCLvpgTplMCp896DWLlXu9eaGUCNjT/sZM8zafAXbfc6OKYFQ5iANAiJktWwKaUaphJkbSVWT5 elizabeth@r2d2", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3onVLOZiiGpQWTCIV0QwHmc3Jvqyl7UaJxIu7D49OQcLHqVZsozI9pSiCdTnWyAaM+E+5wD9yVcSTqMWqn2AZmZSwQ+Fh6KnCgPZ/o63+iCZPGL0RNk20M1iNh5dvdStDnn+j2fpeV/JONF0tBn07QvNL2eF4BwtbTG9Zhl186QNsXjXDghrSO3Etl6DSfcUhxyvMoA2LnclWWD5hLmiRhcBm+PIxveVsr4B+o0k1HV5SUOvJMWtbEC37AH5I818O4fNOob6CnOFaCsbA9oUDzB5rqxutPZb9SmNJpNoLqYqDgyppM0yeql0Kn97tUt7H4j5xHrWoGnJ4IXfuDc0AMmmy4fpcLGkNf7zcBftKS6iz/3AlOXjlp5WZvKxngJj9HIir2SE/qV4Lxw9936BzvAcQyw5+bEsLQJwi+LPZxEqLC6oklkX9dg/+1yBFHsz6mulA0b4Eq7VF9omRzrhhN4iPpU5KQYPRNz7yRYckXDxYnp2lz6yHgSYh2/lqMc+UqmCL9EAWcDw3jsgvJ6kH/YUVUojiRHD9QLqlhOusu1wrTfojjwF05mqkXKmH+LH8f8AJAlMdYg0c2WLlrcxnwCkLLxzU5cYmKcZ41LuLtQR3ik+EKjYzBXXyCEzFm6qQEbR2akpXyxvONgrf7pijrgNOi0GeatUt0bUQcAONYw== jhesketh@infra", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTDia7zLp6NB/DdzhGW/4MDgaQ1yemfF+fGFctrSbBZzP2Aj3RUlBh4Mut3bTIqp/PKNMXVZQbvig5nqF3sB87ZPvmk+7WluFFcQN1RIZnvkYXjF64C+G5PkEZOQW9nqEeElSCV2lXgK98FPrGtK6HgQlYxH5RJa6cufRwYLXLsAwfKRcS3P5oRU2KDORNm6uBfUuX0TyPgtEjYsjCWcffoW+E8kvZbx1DKxF4+u0mWSdkg0P40aAY10mHACtJ4hnu7xNa5Z9Oru1rA1KWL5NHISgy9t5zC1/0jWfYi+tqToBgUCyB8stWgNpHh+QJrpS8CoCDzQLBar0ynnOxBfHH2+s9xJapQNi6ZOC3khWkoxUJn2Gs9FXqow3zGSmEuEKbbUvaGC58U4S0xFcZzF+sOzjRJtw66wE2pQN5Pj/Qw09w6gt05g4nxoxkRVCwMLdnyoIY1oFmywJX3xC1Utu2oCNfgZSn78rqVkE9e11LczPNGvYjl6xQo1r254E0w3QBgo+LaTK5FBRCAbJ76n0IBJ8SZe9foPWjKTGlbCevM6KO8lm58/0m0EfMf9457ZM9KhyXwYvnb+iR7huGC+pwgGemJ4D6vjeE9EUNGSq6igg+v+cl1DHOxVb0s0Tx2T6DMh3usB4C1uoNCR303cmzrNZ94KLXRICQArSClQI7OQ== nibz@hertz", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSR2NmJC8PSanHUpKJuaMmohG80COO2IPkE3Mxhr7US8P1B3p1c6lOrT6M1txRzBY8FlbxfOinGtutP+ADCB2taXfpO8UiaG9eOqojAT/PeP2Y2ov72rVMSWupLozUv2uAR5yyFVFHOjKPYGAa01aJtfzfJujSak8dM0ifFeFwgp/8RBGEfC7atq+45TdrfAURRcEgcOLiF5Aq6fprCOwpllnrH6VoId9YS7u/5xF2/zBjr9PuOP7jEgCaL/+FNqu7jgj87aG5jiZPlweb7GTLJON9H6eFpyfpoJE0sZ1yR9Q+e9FAqQIA44Zi748qKBlFKbLxzoC4mc0SbNUAleEL yolanda@infra", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1CW5E87v8o7O8B5fe7j1uaPCToRdaBukjH2HzQZ+DSGTIPjirLpp5ZXPuyNnmtRMzwld6mlHYlevVEwuZTNyQwS7ut5o0LjyW6yoEcvPq0xMEZLxaso5dZAtzNgf3FzbtaUYBnkhSwX7c24lf8wPGAl7TC3yO0dePQh2lXVdaBiGB9ybVeQr+kwJIxleUE4puuQ+ONJE2D+hHjoQ/huUMpb996pb/YzkjkAxqHguMid0c1taelyW8n17nEDoWvlV9Qqbo8cerhgURo1OBt2zENLjQQ0kOkPxJx4qx3652e0kbkr11y50r9BMs418mnJdWselMxkSqQNZ+XotoH5Dwn+3K2a6Wv4OX3Dqb9SF/JTD7lA/tIkNfxgsRlzfEQ01rK1+g7Je10EnDCLEzHpFjvZ5q4EEMcYqY+osLFpHAOWGLMx+3eY4pz/xEzRP/x3sjGU09uNOZ3oCWUfSkE4xebnnWtxwWZKyFmv3GHtaqJn2UvpAbODPEYyYcOS3XV3zd233W3C09YYnFUyZbGLXpD05Yet5fZfGTnveMRn5/9LZai+dBPwoMWUJdX4yPnGXgOG8zk0u1nWfcNJfYg+xajSUDiMKjDhlkuFK/GXNYuINe42s1TxzL7pJ4X4UhqLiopeJvPg/U5xdCV5pxVKf1MVenrGe2pfwf1Yr2WMv5w== rcarrillocruz@infra", "", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOjz+dkwRWTJcW9Gt3iGHSzRBsvVlTAK6G2oH3+0D41 iwienand+osinfra@redhat.com", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdzEzB2KpNLTTFJGLCNMY53sja37PXFzHHdjWEGaZtaTcuCn/ufV9ql5yhS5/414u9swoHM71H00+nT4uSWcXc2tTRXYWslaiwU47DOtQsD//CvGgIFBNO1EinWhYa5uTSfxI+Z/x4PBu7XFq5wi/JCfJ+iHIWsvXn8U44r1csURcZU0GMPAVG1MO+s3p1W7daVqF9RR7UuwCECb3hdPN1N/M4s6myBiuRXCeDND98dKLf8b342hw+pWvQ3g/OCLcVlYPWT4fy1YGQT8hT+jA2XPfwCtu/k7HKAGH3E8UcnBtY/RI9ibciIFe+Ro7q8t+tp5SgjGLq1NnE4Yp5rpsh david@koala", "", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCuP0CZE8AYnbm8gxecCxKeRw0wHRyryd+FKmNNsdr0d3UvfCbqNzLigrqEBZsKpofi3M4qCWNpKRyfhnjPynLTQjP1vnX9AbL9UGoiHxScfvh3skntTYMs9ezJRd0rMJJZO76FPo8bJLDlwxAQl8m/nuj3HfYiO5hYE7P+a3rhsJh4nEfBb7xh+Q5yM0PWObkkBl6IRiBYjlcsXNZHgTA5kNuihUk5bHqAw54sHh05DhpgOITpTw4LFbh4Ew2NKq49dEb2xbTuAyAr2DHNOGgIwKEZpwtKZEIGEuiLbb4DQRsfivrvyOjnK2NFjQzGyNOHfsOldWHRQwUKUs8nrxKdXvqcrfMnSVaibeYK2TRL+6jd9kc5SIhWI3XLm7HbX7uXMD7/JQrkL25Rcs6nndDCH72DJLz+ynA/T5umMbNBQ9tybL5z73IOpfShRGjQYego22CxDOy7e/5OEMHNoksbFb1S02viM9O2puS7LDqqfT9JIbbPqCrbRi/zOXo0f4EXo6xKUAmd8qlV+6f/p57/qFihzQDaRFVlFEH3k7qwsw7PYGUTwkPaThe6xyZN6D5jqxCZU3aSYu+FGb0oYo+M5IxOm0Cb4NNsvvkRPxWtwSayfFGu6+m/+/RyA3GBcAMev7AuyKN+K2vGMsLagHOx4i+5ZAcUwGzLeXAENNum3w== pabelanger@redhat.com"]}}, "ami-id": "ami-00021567", "kernel-id": "None", "public-ipv4": null, "block-device-mapping": {"ami": "vda", "root": "/dev/vda"}, "ami-manifest-path": "FIXME", "security-groups": null, "instance-action": "none", "instance-id": "i-006a62e0"}, "facter_memoryfree_mb": "6873.67", "ansible_distribution_release": "Core", "ansible_os_family": "RedHat", "facter_lsbdistcodename": "Core", "ansible_effective_user_id": 0, "facter_sshfp_ed25519": "SSHFP 4 1 a54448a9f87a2db5e04b69de1cd2d6266b1bec6e\
0.233 | 6252: SSHFP 4 2 dde561c94266ebc1e9307e2faf9a049f2599db641ffcac93c55ab4b71025ceb8", "facter_network_br_ex": "192.168.24.0", "facter_lsbrelease": ":core-4.1-amd64:core-4.1-noarch", "ansible_devices": {"vda": {"scheduler_mode": "", "rotational": "1", "vendor": "0x1af4", "sectors": "167772160", "sas_device_handle": null, "sas_address": null, "host": "SCSI storage controller: Red Hat, Inc Virtio block device", "sectorsize": "512", "removable": "0", "support_discard": "0", "model": null, "partitions": {"vda1": {"sectorsize": 512, "uuid": "2d930d32-4be2-4440-8fec-05114b535f83", "sectors": "167770079", "start": "2048", "holders": [], "size": "80.00 GB"}}, "holders": [], "size": "80.00 GB"}, "sr0": {"scheduler_mode": "cfq", "rotational": "1", "vendor": "QEMU", "sectors": "892", "sas_device_handle": null, "sas_address": null, "host": "IDE interface: Intel Corporation 82371SB PIIX3 IDE [Natoma/Triton II]", "sectorsize": "2048", "removable": "1", "support_discard": "0", "model": "QEMU DVD-ROM", "partitions": {}, "holders": [], "size": "1.74 MB"}}, "ansible_user_uid": 0, "ansible_bios_date": "04/01/2014", "facter_selinux_enforced": true, "facter_uptime": "1:06 hours", "facter_blockdevice_sr0_vendor": "QEMU", "ansible_bios_version": "2:1.10.2-58953eb7", "facter_blockdevice_sr0_size": 456704, "facter_swapfree_mb": "8192.00", "facter_hardwaremodel": "x86_64", "facter_id": "root", "facter_type": "Other", "facter_blockdevice_sr0_model": "QEMU DVD-ROM"}}\r
0.233 | 6252: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.233 | 6252: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.233 | 6252: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.233 | 6252: debug1: auto-mux: Trying existing master\r
0.233 | 6252: debug2: fd 3 setting O_NONBLOCK\r
0.233 | 6252: debug2: mux_client_hello_exchange: master version 4\r
0.233 | 6252: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.233 | 6252: debug3: mux_client_request_session: entering\r
0.233 | 6252: debug3: mux_client_request_alive: entering\r
0.233 | 6252: debug3: mux_client_request_alive: done pid = 9310\r
0.233 | 6252: debug3: mux_client_request_session: session request sent\r
0.233 | 6252: debug1: mux_client_request_session: master session id: 2\r
0.233 | 6252: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.233 | 6252: debug2: Received exit status from master 0\r
0.233 | 6252: Shared connection to 149.202.161.193 closed.\r
0.233 | 6252: ')
0.000 | 6253: ok: [149.202.161.193]
0.000 | 6254: META: ran handlers
0.000 | 6255:
0.000 | 6256: TASK [create user tripleo-admin] ***********************************************
0.416 | 6257: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:3
0.000 | 6258: Using module file /usr/lib/python2.7/site-packages/ansible/modules/system/user.py
0.000 | 6259: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6260: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6261: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6262: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6269: <149.202.161.193> (0, '/home/jenkins
0.000 | 6269: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6269: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6269: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6269: debug1: auto-mux: Trying existing master\r
0.000 | 6269: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6269: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6269: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6269: debug3: mux_client_request_session: entering\r
0.000 | 6269: debug3: mux_client_request_alive: entering\r
0.000 | 6269: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6269: debug3: mux_client_request_session: session request sent\r
0.000 | 6269: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6269: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6269: debug2: Received exit status from master 0\r
0.000 | 6269: ')
0.000 | 6270: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6271: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6272: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6273: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6280: <149.202.161.193> (0, 'ansible-tmp-1505415909.92-41229317469196=/home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196
0.000 | 6280: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6280: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6280: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6280: debug1: auto-mux: Trying existing master\r
0.000 | 6280: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6280: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6280: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6280: debug3: mux_client_request_session: entering\r
0.000 | 6280: debug3: mux_client_request_alive: entering\r
0.000 | 6280: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6280: debug3: mux_client_request_session: session request sent\r
0.000 | 6280: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6280: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6280: debug2: Received exit status from master 0\r
0.000 | 6280: ')
0.000 | 6281: <149.202.161.193> PUT /tmp/tmp4VSvIE TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196/user.py
0.000 | 6282: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6283: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6284: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6291: <149.202.161.193> (0, 'sftp> put /tmp/tmp4VSvIE /home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196/user.py
0.000 | 6291: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6291: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6291: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6291: debug1: auto-mux: Trying existing master\r
0.000 | 6291: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6291: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6291: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6291: debug3: mux_client_request_session: entering\r
0.000 | 6291: debug3: mux_client_request_alive: entering\r
0.000 | 6291: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6291: debug3: mux_client_request_session: session request sent\r
0.000 | 6291: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6291: debug2: Remote version: 3\r
0.000 | 6291: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6291: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6291: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6291: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6291: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6291: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6291: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6291: debug3: Looking up /tmp/tmp4VSvIE\r
0.000 | 6291: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6291: debug3: Received stat reply T:101 I:2\r
0.000 | 6291: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6291: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415909.92-41229317469196/user.py\r
0.000 | 6291: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6291: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6291: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6291: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:32768\r
0.000 | 6291: debug3: Sent message SSH2_FXP_WRITE I:6 O:65536 S:5697\r
0.000 | 6291: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6291: debug3: In write loop, ack for 5 32768 bytes at 32768\r
0.000 | 6291: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6291: debug3: In write loop, ack for 6 5697 bytes at 65536\r
0.000 | 6291: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6291: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6291: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6291: debug2: Received exit status from master 0\r
0.000 | 6291: ')
0.000 | 6292: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6293: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6294: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6295: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6302: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6302: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6302: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6302: debug1: auto-mux: Trying existing master\r
0.000 | 6302: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6302: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6302: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6302: debug3: mux_client_request_session: entering\r
0.000 | 6302: debug3: mux_client_request_alive: entering\r
0.000 | 6302: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6302: debug3: mux_client_request_session: session request sent\r
0.000 | 6302: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6302: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6302: debug2: Received exit status from master 0\r
0.000 | 6302: ')
0.000 | 6303: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6304: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6305: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6306: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6356: }
0.000 | 6357:
0.000 | 6358: TASK [grant admin rights to user tripleo-admin] ********************************
0.416 | 6359: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:5
0.000 | 6360: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/stat.py
0.000 | 6361: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6362: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6363: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6364: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6371: <149.202.161.193> (0, '/home/jenkins
0.000 | 6371: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6371: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6371: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6371: debug1: auto-mux: Trying existing master\r
0.000 | 6371: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6371: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6371: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6371: debug3: mux_client_request_session: entering\r
0.000 | 6371: debug3: mux_client_request_alive: entering\r
0.000 | 6371: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6371: debug3: mux_client_request_session: session request sent\r
0.000 | 6371: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6371: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6371: debug2: Received exit status from master 0\r
0.000 | 6371: ')
0.000 | 6372: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6373: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6374: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6375: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6382: <149.202.161.193> (0, 'ansible-tmp-1505415910.55-235869433965926=/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926
0.000 | 6382: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6382: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6382: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6382: debug1: auto-mux: Trying existing master\r
0.000 | 6382: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6382: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6382: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6382: debug3: mux_client_request_session: entering\r
0.000 | 6382: debug3: mux_client_request_alive: entering\r
0.000 | 6382: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6382: debug3: mux_client_request_session: session request sent\r
0.000 | 6382: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6382: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6382: debug2: Received exit status from master 0\r
0.000 | 6382: ')
0.000 | 6383: <149.202.161.193> PUT /tmp/tmpnszOvS TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926/stat.py
0.000 | 6384: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6385: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6386: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6393: <149.202.161.193> (0, 'sftp> put /tmp/tmpnszOvS /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926/stat.py
0.000 | 6393: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6393: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6393: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6393: debug1: auto-mux: Trying existing master\r
0.000 | 6393: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6393: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6393: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6393: debug3: mux_client_request_session: entering\r
0.000 | 6393: debug3: mux_client_request_alive: entering\r
0.000 | 6393: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6393: debug3: mux_client_request_session: session request sent\r
0.000 | 6393: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6393: debug2: Remote version: 3\r
0.000 | 6393: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6393: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6393: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6393: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6393: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6393: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6393: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6393: debug3: Looking up /tmp/tmpnszOvS\r
0.000 | 6393: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6393: debug3: Received stat reply T:101 I:2\r
0.000 | 6393: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6393: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.55-235869433965926/stat.py\r
0.000 | 6393: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6393: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6393: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6393: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:27907\r
0.000 | 6393: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6393: debug3: In write loop, ack for 5 27907 bytes at 32768\r
0.000 | 6393: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6393: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6393: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6393: debug2: Received exit status from master 0\r
0.000 | 6393: ')
0.000 | 6394: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6395: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6396: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6397: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6404: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6404: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6404: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6404: debug1: auto-mux: Trying existing master\r
0.000 | 6404: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6404: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6404: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6404: debug3: mux_client_request_session: entering\r
0.000 | 6404: debug3: mux_client_request_alive: entering\r
0.000 | 6404: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6404: debug3: mux_client_request_session: session request sent\r
0.000 | 6404: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6404: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6404: debug2: Received exit status from master 0\r
0.000 | 6404: ')
0.000 | 6405: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6406: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6407: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6408: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6415: <149.202.161.193> (0, '\r
0.000 | 6415: {"invocation": {"module_args": {"checksum_algorithm": "sha1", "get_checksum": true, "path": "/etc/sudoers.d/tripleo-admin", "checksum_algo": "sha1", "follow": false, "get_md5": false, "get_mime": true, "get_attributes": true}}, "stat": {"exists": false}, "changed": false}\r
0.000 | 6415: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6415: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6415: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6415: debug1: auto-mux: Trying existing master\r
0.000 | 6415: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6415: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6415: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6415: debug3: mux_client_request_session: entering\r
0.000 | 6415: debug3: mux_client_request_alive: entering\r
0.000 | 6415: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6415: debug3: mux_client_request_session: session request sent\r
0.000 | 6415: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6415: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6415: debug2: Received exit status from master 0\r
0.000 | 6415: Shared connection to 149.202.161.193 closed.\r
0.000 | 6415: ')
0.000 | 6416: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6417: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6418: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6419: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6426: <149.202.161.193> (0, '/home/jenkins
0.000 | 6426: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6426: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6426: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6426: debug1: auto-mux: Trying existing master\r
0.000 | 6426: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6426: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6426: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6426: debug3: mux_client_request_session: entering\r
0.000 | 6426: debug3: mux_client_request_alive: entering\r
0.000 | 6426: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6426: debug3: mux_client_request_session: session request sent\r
0.000 | 6426: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6426: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6426: debug2: Received exit status from master 0\r
0.000 | 6426: ')
0.000 | 6427: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6428: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6429: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6430: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6437: <149.202.161.193> (0, 'ansible-tmp-1505415910.91-263263336394362=/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362
0.000 | 6437: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6437: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6437: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6437: debug1: auto-mux: Trying existing master\r
0.000 | 6437: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6437: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6437: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6437: debug3: mux_client_request_session: entering\r
0.000 | 6437: debug3: mux_client_request_alive: entering\r
0.000 | 6437: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6437: debug3: mux_client_request_session: session request sent\r
0.000 | 6437: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6437: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6437: debug2: Received exit status from master 0\r
0.000 | 6437: ')
0.000 | 6438: <149.202.161.193> PUT /tmp/tmplExnaD TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/source
0.000 | 6439: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6440: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6441: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6448: <149.202.161.193> (0, 'sftp> put /tmp/tmplExnaD /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/source
0.000 | 6448: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6448: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6448: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6448: debug1: auto-mux: Trying existing master\r
0.000 | 6448: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6448: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6448: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6448: debug3: mux_client_request_session: entering\r
0.000 | 6448: debug3: mux_client_request_alive: entering\r
0.000 | 6448: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6448: debug3: mux_client_request_session: session request sent\r
0.000 | 6448: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6448: debug2: Remote version: 3\r
0.000 | 6448: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6448: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6448: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6448: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6448: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6448: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6448: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6448: debug3: Looking up /tmp/tmplExnaD\r
0.000 | 6448: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6448: debug3: Received stat reply T:101 I:2\r
0.000 | 6448: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6448: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/source\r
0.000 | 6448: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:37\r
0.000 | 6448: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6448: debug3: In write loop, ack for 4 37 bytes at 0\r
0.000 | 6448: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6448: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6448: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6448: debug2: Received exit status from master 0\r
0.000 | 6448: ')
0.000 | 6449: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6450: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6451: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6452: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6460: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/copy.py
0.000 | 6461: <149.202.161.193> PUT /tmp/tmpQnOWmw TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/copy.py
0.000 | 6462: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6463: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6464: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6471: <149.202.161.193> (0, 'sftp> put /tmp/tmpQnOWmw /home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/copy.py
0.000 | 6471: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6471: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6471: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6471: debug1: auto-mux: Trying existing master\r
0.000 | 6471: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6471: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6471: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6471: debug3: mux_client_request_session: entering\r
0.000 | 6471: debug3: mux_client_request_alive: entering\r
0.000 | 6471: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6471: debug3: mux_client_request_session: session request sent\r
0.000 | 6471: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6471: debug2: Remote version: 3\r
0.000 | 6471: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6471: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6471: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6471: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6471: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6471: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6471: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6471: debug3: Looking up /tmp/tmpQnOWmw\r
0.000 | 6471: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6471: debug3: Received stat reply T:101 I:2\r
0.000 | 6471: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6471: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415910.91-263263336394362/copy.py\r
0.000 | 6471: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6471: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6471: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6471: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28018\r
0.000 | 6471: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6471: debug3: In write loop, ack for 5 28018 bytes at 32768\r
0.000 | 6471: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6471: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6471: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6471: debug2: Received exit status from master 0\r
0.000 | 6471: ')
0.000 | 6472: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6473: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6474: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6475: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6482: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6482: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6482: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6482: debug1: auto-mux: Trying existing master\r
0.000 | 6482: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6482: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6482: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6482: debug3: mux_client_request_session: entering\r
0.000 | 6482: debug3: mux_client_request_alive: entering\r
0.000 | 6482: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6482: debug3: mux_client_request_session: session request sent\r
0.000 | 6482: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6482: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6482: debug2: Received exit status from master 0\r
0.000 | 6482: ')
0.000 | 6483: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6484: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6485: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6486: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6533: }
0.000 | 6534:
0.000 | 6535: TASK [ensure .ssh dir exists for user tripleo-admin] ***************************
0.416 | 6536: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:9
0.000 | 6537: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/file.py
0.000 | 6538: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6539: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6540: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6541: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6548: <149.202.161.193> (0, '/home/jenkins
0.000 | 6548: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6548: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6548: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6548: debug1: auto-mux: Trying existing master\r
0.000 | 6548: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6548: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6548: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6548: debug3: mux_client_request_session: entering\r
0.000 | 6548: debug3: mux_client_request_alive: entering\r
0.000 | 6548: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6548: debug3: mux_client_request_session: session request sent\r
0.000 | 6548: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6548: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6548: debug2: Received exit status from master 0\r
0.000 | 6548: ')
0.000 | 6549: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6550: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6551: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6552: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6559: <149.202.161.193> (0, 'ansible-tmp-1505415911.71-211122090089966=/home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966
0.000 | 6559: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6559: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6559: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6559: debug1: auto-mux: Trying existing master\r
0.000 | 6559: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6559: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6559: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6559: debug3: mux_client_request_session: entering\r
0.000 | 6559: debug3: mux_client_request_alive: entering\r
0.000 | 6559: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6559: debug3: mux_client_request_session: session request sent\r
0.000 | 6559: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6559: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6559: debug2: Received exit status from master 0\r
0.000 | 6559: ')
0.000 | 6560: <149.202.161.193> PUT /tmp/tmpTZUrAq TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966/file.py
0.000 | 6561: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6562: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6563: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6570: <149.202.161.193> (0, 'sftp> put /tmp/tmpTZUrAq /home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966/file.py
0.000 | 6570: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6570: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6570: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6570: debug1: auto-mux: Trying existing master\r
0.000 | 6570: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6570: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6570: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6570: debug3: mux_client_request_session: entering\r
0.000 | 6570: debug3: mux_client_request_alive: entering\r
0.000 | 6570: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6570: debug3: mux_client_request_session: session request sent\r
0.000 | 6570: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6570: debug2: Remote version: 3\r
0.000 | 6570: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6570: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6570: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6570: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6570: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6570: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6570: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6570: debug3: Looking up /tmp/tmpTZUrAq\r
0.000 | 6570: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6570: debug3: Received stat reply T:101 I:2\r
0.000 | 6570: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6570: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415911.71-211122090089966/file.py\r
0.000 | 6570: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6570: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6570: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6570: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28880\r
0.000 | 6570: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6570: debug3: In write loop, ack for 5 28880 bytes at 32768\r
0.000 | 6570: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6570: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6570: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6570: debug2: Received exit status from master 0\r
0.000 | 6570: ')
0.000 | 6571: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6572: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6573: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6574: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6581: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6581: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6581: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6581: debug1: auto-mux: Trying existing master\r
0.000 | 6581: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6581: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6581: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6581: debug3: mux_client_request_session: entering\r
0.000 | 6581: debug3: mux_client_request_alive: entering\r
0.000 | 6581: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6581: debug3: mux_client_request_session: session request sent\r
0.000 | 6581: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6581: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6581: debug2: Received exit status from master 0\r
0.000 | 6581: ')
0.000 | 6582: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6583: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6584: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6585: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6648: }
0.000 | 6649:
0.000 | 6650: TASK [ensure authorized_keys file exists for user tripleo-admin] ***************
0.416 | 6651: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:12
0.000 | 6652: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/file.py
0.000 | 6653: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6654: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6655: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6656: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6663: <149.202.161.193> (0, '/home/jenkins
0.000 | 6663: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6663: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6663: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6663: debug1: auto-mux: Trying existing master\r
0.000 | 6663: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6663: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6663: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6663: debug3: mux_client_request_session: entering\r
0.000 | 6663: debug3: mux_client_request_alive: entering\r
0.000 | 6663: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6663: debug3: mux_client_request_session: session request sent\r
0.000 | 6663: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6663: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6663: debug2: Received exit status from master 0\r
0.000 | 6663: ')
0.000 | 6664: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6665: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6666: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6667: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6674: <149.202.161.193> (0, 'ansible-tmp-1505415912.11-62209096158516=/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516
0.000 | 6674: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6674: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6674: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6674: debug1: auto-mux: Trying existing master\r
0.000 | 6674: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6674: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6674: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6674: debug3: mux_client_request_session: entering\r
0.000 | 6674: debug3: mux_client_request_alive: entering\r
0.000 | 6674: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6674: debug3: mux_client_request_session: session request sent\r
0.000 | 6674: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6674: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6674: debug2: Received exit status from master 0\r
0.000 | 6674: ')
0.000 | 6675: <149.202.161.193> PUT /tmp/tmpRPnr2l TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516/file.py
0.000 | 6676: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6677: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6678: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6685: <149.202.161.193> (0, 'sftp> put /tmp/tmpRPnr2l /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516/file.py
0.000 | 6685: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6685: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6685: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6685: debug1: auto-mux: Trying existing master\r
0.000 | 6685: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6685: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6685: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6685: debug3: mux_client_request_session: entering\r
0.000 | 6685: debug3: mux_client_request_alive: entering\r
0.000 | 6685: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6685: debug3: mux_client_request_session: session request sent\r
0.000 | 6685: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6685: debug2: Remote version: 3\r
0.000 | 6685: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6685: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6685: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6685: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6685: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6685: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6685: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6685: debug3: Looking up /tmp/tmpRPnr2l\r
0.000 | 6685: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6685: debug3: Received stat reply T:101 I:2\r
0.000 | 6685: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6685: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.11-62209096158516/file.py\r
0.000 | 6685: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6685: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6685: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6685: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28892\r
0.000 | 6685: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6685: debug3: In write loop, ack for 5 28892 bytes at 32768\r
0.000 | 6685: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6685: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6685: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6685: debug2: Received exit status from master 0\r
0.000 | 6685: ')
0.000 | 6686: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6687: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6688: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6689: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6696: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6696: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6696: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6696: debug1: auto-mux: Trying existing master\r
0.000 | 6696: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6696: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6696: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6696: debug3: mux_client_request_session: entering\r
0.000 | 6696: debug3: mux_client_request_alive: entering\r
0.000 | 6696: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6696: debug3: mux_client_request_session: session request sent\r
0.000 | 6696: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6696: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6696: debug2: Received exit status from master 0\r
0.000 | 6696: ')
0.000 | 6697: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6698: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6699: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6700: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6763: }
0.000 | 6764:
0.000 | 6765: TASK [authorize TripleO Mistral key for user tripleo-admin] ********************
0.416 | 6766: task path: /tmp/ansible-mistral-actionq4fk5K/playbook.yaml:15
0.000 | 6767: Using module file /usr/lib/python2.7/site-packages/ansible/modules/files/lineinfile.py
0.000 | 6768: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6769: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6770: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6771: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6778: <149.202.161.193> (0, '/home/jenkins
0.000 | 6778: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6778: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6778: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6778: debug1: auto-mux: Trying existing master\r
0.000 | 6778: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6778: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6778: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6778: debug3: mux_client_request_session: entering\r
0.000 | 6778: debug3: mux_client_request_alive: entering\r
0.000 | 6778: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6778: debug3: mux_client_request_session: session request sent\r
0.000 | 6778: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6778: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6778: debug2: Received exit status from master 0\r
0.000 | 6778: ')
0.000 | 6779: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6780: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6781: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6782: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6789: <149.202.161.193> (0, 'ansible-tmp-1505415912.64-64968773191150=/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150
0.000 | 6789: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6789: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6789: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6789: debug1: auto-mux: Trying existing master\r
0.000 | 6789: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6789: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6789: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6789: debug3: mux_client_request_session: entering\r
0.000 | 6789: debug3: mux_client_request_alive: entering\r
0.000 | 6789: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6789: debug3: mux_client_request_session: session request sent\r
0.000 | 6789: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6789: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6789: debug2: Received exit status from master 0\r
0.000 | 6789: ')
0.000 | 6790: <149.202.161.193> PUT /tmp/tmpUb4rld TO /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150/lineinfile.py
0.000 | 6791: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6792: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6793: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6800: <149.202.161.193> (0, 'sftp> put /tmp/tmpUb4rld /home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150/lineinfile.py
0.000 | 6800: ', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6800: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6800: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6800: debug1: auto-mux: Trying existing master\r
0.000 | 6800: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6800: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6800: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6800: debug3: mux_client_request_session: entering\r
0.000 | 6800: debug3: mux_client_request_alive: entering\r
0.000 | 6800: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6800: debug3: mux_client_request_session: session request sent\r
0.000 | 6800: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6800: debug2: Remote version: 3\r
0.000 | 6800: debug2: Server supports extension "posix-rename@openssh.com" revision 1\r
0.000 | 6800: debug2: Server supports extension "statvfs@openssh.com" revision 2\r
0.000 | 6800: debug2: Server supports extension "fstatvfs@openssh.com" revision 2\r
0.000 | 6800: debug2: Server supports extension "hardlink@openssh.com" revision 1\r
0.000 | 6800: debug2: Server supports extension "fsync@openssh.com" revision 1\r
0.000 | 6800: debug3: Sent message fd 5 T:16 I:1\r
0.000 | 6800: debug3: SSH_FXP_REALPATH . -> /home/jenkins size 0\r
0.000 | 6800: debug3: Looking up /tmp/tmpUb4rld\r
0.000 | 6800: debug3: Sent message fd 5 T:17 I:2\r
0.000 | 6800: debug3: Received stat reply T:101 I:2\r
0.000 | 6800: debug1: Couldn\'t stat remote file: No such file or directory\r
0.000 | 6800: debug3: Sent message SSH2_FXP_OPEN I:3 P:/home/jenkins/.ansible/tmp/ansible-tmp-1505415912.64-64968773191150/lineinfile.py\r
0.000 | 6800: debug3: Sent message SSH2_FXP_WRITE I:4 O:0 S:32768\r
0.000 | 6800: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6800: debug3: In write loop, ack for 4 32768 bytes at 0\r
0.000 | 6800: debug3: Sent message SSH2_FXP_WRITE I:5 O:32768 S:28744\r
0.000 | 6800: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6800: debug3: In write loop, ack for 5 28744 bytes at 32768\r
0.000 | 6800: debug3: Sent message SSH2_FXP_CLOSE I:4\r
0.000 | 6800: debug3: SSH2_FXP_STATUS 0\r
0.000 | 6800: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6800: debug2: Received exit status from master 0\r
0.000 | 6800: ')
0.000 | 6801: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6802: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6803: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6804: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)

0.000 | 6811: <149.202.161.193> (0, '', 'OpenSSH_7.4p1, OpenSSL 1.0.2k-fips 26 Jan 2017\r
0.000 | 6811: debug1: Reading configuration data /etc/ssh/ssh_config\r
0.000 | 6811: debug1: /etc/ssh/ssh_config line 58: Applying options for *\r
0.000 | 6811: debug1: auto-mux: Trying existing master\r
0.000 | 6811: debug2: fd 3 setting O_NONBLOCK\r
0.000 | 6811: debug2: mux_client_hello_exchange: master version 4\r
0.000 | 6811: debug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r
0.000 | 6811: debug3: mux_client_request_session: entering\r
0.000 | 6811: debug3: mux_client_request_alive: entering\r
0.000 | 6811: debug3: mux_client_request_alive: done pid = 9310\r
0.000 | 6811: debug3: mux_client_request_session: session request sent\r
0.000 | 6811: debug1: mux_client_request_session: master session id: 2\r
0.000 | 6811: debug3: mux_client_read_packet: read header failed: Broken pipe\r
0.000 | 6811: debug2: Received exit status from master 0\r
0.000 | 6811: ')
0.000 | 6812: <149.202.161.193> ESTABLISH SSH CONNECTION FOR USER: jenkins
0.000 | 6813: <149.202.161.193> SSH: ansible.cfg set ssh_args: (-C)(-o)(ControlMaster=auto)(-o)(ControlPersist=60s)
0.227 | 6814: <149.202.161.193> SSH: ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set: (-o)(IdentityFile="/tmp/ansible-mistral-actionq4fk5K/ssh_private_key")
0.000 | 6815: <149.202.161.193> SSH: ansible_password/ansible_ssh_pass not set: (-o)(KbdInteractiveAuthentication=no)(-o)(PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey)(-o)(PasswordAuthentication=no)