#!/bin/sh

set -e

MY_ROLE=$(cat /etc/oci/my-role)

oci-wait-for-networking


start_puppet_service () {
        systemctl enable puppet
        systemctl start puppet
        oci-report-puppet-success
}

# Apply puppet and popuplate PUPPET_RETURN_CODE with the return value.
# Takes the run number as argument.
apply_puppet () {
	local RUN_NUMBER
	RUN_NUMBER=${1}
	# Puppet, with --detailed-exitcodes exits this way:
	# 0: The run succeeded with no changes or failures; the system was already in the desired state.
	# 1: The run failed, or wasn't attempted due to another run already in progress.
	# 2: The run succeeded, and some resources were changed.
	# 4: The run succeeded, and some resources failed.
	# 6: The run succeeded, and included both changes and failures.
	# *: Everything else is undefined.
	# so we run it once, if 0 or 2, success and report back to OCI, otherwise, we try again.
	# If the 2nd run success, we report it, otherwise, report failure.
	set +e
	if [ -e /etc/oci/self-signed-api-cert ] ; then
		OS_CACERT=/etc/ssl/certs/oci-pki-oci-ca-chain.pem puppet agent --test --debug --detailed-exitcodes >/var/log/puppet-run-${RUN_NUMBER} 2>&1
		PUPPET_RETURN_CODE=$?
	else
		puppet agent --test --debug --detailed-exitcodes >/var/log/puppet-run-${RUN_NUMBER} 2>&1
		PUPPET_RETURN_CODE=$?
	fi
	set -e
}

# This function runs puppet multiple times, until applying works.
# It then reports back to OCI if it worked, and start the agent
# service in the system if it worked.
apply_puppet_in_loop () {
	local CNT MAX_RUN
	CNT=0
	MAX_RUN=5

	oci-report-puppet-running

	PUPPET_RETURN_CODE=1
	while [ "${CNT}" -lt "${MAX_RUN}" ] && [ "${PUPPET_RETURN_CODE}" != 0 ] && [ "${PUPPET_RETURN_CODE}" != 2 ]; do
		# Before running the 2nd puppet -t run, we must manually start libvirtd and virtlogd
		if [ ${CNT} = 1 ] && [ "${MY_ROLE}" = "compute" ] ; then
			echo "Before 2nd puppet run: launching oci-fixup-compute-node."
			oci-fixup-compute-node
		fi
		if [ ${CNT} != 0 ] ; then
			echo "Error during the puppet run: running again (run "$((${CNT} + 1))")."
		fi
		CNT=$((${CNT} + 1))
		apply_puppet ${CNT}
		# Handle puppet failures
		case "${PUPPET_RETURN_CODE}" in
		0|2)
			echo -n ""
		;;
		*)
			case "${MY_ROLE}" in
			"controller"|"messaging")
				# If mariadb-server is installed...
				if dpkg-query -W mariadb-server >/dev/null 2>/dev/null ; then
					# ...and it's not connected...
					if [ ""$(mysql -e "SHOW STATUS LIKE 'wsrep_connected'" | tail -n 1 | awk '{print $2}') = "OFF" ] ; then
						# ...we're on the case where mysql has failed starting.
						# Let's schedule a restart so it connects to the cluster properly.
						# We can then restart applying puppet.
						systemctl stop mysql
						rm -rf /var/lib/mysql/*
						systemctl start mysql
					fi
				fi
			;;
			*)
				echo -n ""
			;;
			esac
		;;
		esac
	done
	case "${PUPPET_RETURN_CODE}" in
	0|2)
		start_puppet_service
	;;
	*)
		oci-report-puppet-failure
	;;
	esac
	if [ "${MY_ROLE}" = "compute" ] && [ -e /etc/oci/compute_is_cephosd ] ; then
		# This is to ensure /etc/nova/secret.xml is written
		# even in the case of hyper converged nova-compute nodes.
		oci-fixup-compute-node
	fi
}

if [ -e /var/lib/oci-first-boot ] ; then
	rm -f /var/lib/oci-first-boot
	case ${MY_ROLE} in
	"compute")
		# This needs to be up and running before nova-compute
		# gets provisionned by puppet.
		apt-get install -y libvirt-daemon-system
		systemctl restart virtlogd
		sleep 2
		systemctl restart libvirtd
		sleep 2
		if ! [ -e /etc/oci/compute_is_cephosd ] ; then
			FIRST_DISK=$(head -n 1 /etc/oci/data-disks)
			if [ -n "${FIRST_DISK}" ] && [ -e "/dev/${FIRST_DISK}" ] ; then
				oci-build-data-vg /dev/${FIRST_DISK}
			fi
		fi
		oci-write-lvm-filter
		oci-fix-nova-ssh-config
	;;
	"controller")
		FIRST_DISK=$(head -n 1 /etc/oci/data-disks)
		if [ -n "${FIRST_DISK}" ] && [ -e "/dev/${FIRST_DISK}" ] ; then
			oci-build-data-vg /dev/${FIRST_DISK}
		fi
	;;
	"volume")
		oci-build-cinder-volume-vg
		oci-write-lvm-filter
	;;
	*)
		echo -n ""
	;;
	esac

	apply_puppet_in_loop

	case ${MY_ROLE} in
	"cephosd"|"billosd")
		if [ -e /etc/oci/cephosd-automatic-provisionning ] ; then
			DEVLIST="" ; for i in $(cat /etc/oci/data-disks) ; do DEVLIST="${DEVLIST} /dev/$i" ; done ; echo $DEVLIST
			ceph-volume lvm batch --osds-per-device 2 ${DEVLIST} --dmcrypt --yes
		fi
	;;
	*)
		echo -n ""
	;;
	esac
fi

exit 0
