Skip to content

Commit

Permalink
Initial ansible proof of concept
Browse files Browse the repository at this point in the history
  • Loading branch information
ventifus committed Sep 16, 2024
1 parent 9564d81 commit 80d4f73
Show file tree
Hide file tree
Showing 31 changed files with 2,787 additions and 0 deletions.
49 changes: 49 additions & 0 deletions Dockerfile.ansible
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
ARG REGISTRY
ARG VERSION

###############################################################################
# ansible is the base Python image with ansible and azure-cli
###############################################################################
FROM ${REGISTRY}/ubi9/python-311:1-66 AS ansible
# Versions
# pipx https://pypi.org/project/pipx/#history
# azure-cli https://pypi.org/project/azure-cli/#history
# ansible https://pypi.org/project/ansible/#history
# ansible.azcollection https://galaxy.ansible.com/ui/repo/published/azure/azcollection/
ARG PIPX_VERSION=1.6.0 \
ANSIBLE_VERSION=10.2.0 \
AZURE_CLI_VERSION=2.62.0 \
ANSIBLE_AZCOLLECTION_VERSION=2.3.0

# Have Ansible to print task timing information
ENV ANSIBLE_CALLBACKS_ENABLED=profile_tasks
USER root
COPY ansible /ansible
WORKDIR /ansible

# Using pipx here because ansible and azure-cli have differing required core Azure modules
# They each need a separate venv to avoid collisions
RUN ${APP_ROOT}/bin/pip install "pipx==${PIPX_VERSION}" && \
${APP_ROOT}/bin/pipx install "azure-cli==${AZURE_CLI_VERSION}" && \
${APP_ROOT}/bin/pipx install "ansible==${ANSIBLE_VERSION}" --include-deps && \
${APP_ROOT}/bin/pipx runpip ansible install -r "/ansible/ansible-requirements.txt" && \
${HOME}/.local/bin/ansible-galaxy collection install "azure.azcollection==${ANSIBLE_AZCOLLECTION_VERSION}" && \
${APP_ROOT}/bin/pipx runpip ansible install -r "${HOME}/.ansible/collections/ansible_collections/azure/azcollection/requirements-azure.txt" && \
${APP_ROOT}/bin/pipx list && \
rm -rf ${HOME}/.ansible ${HOME}/.azure

###############################################################################
# linter takes the ansible image and injects ansible-lint. Ansible-lint needs
# ansible itself and all ansible modules and python modules installed to correctly lint
###############################################################################
FROM ansible AS linter
ARG ANSIBLE_LINT_VERSION=24.7.0
RUN ${APP_ROOT}/bin/pipx inject --include-apps ansible "ansible-lint==${ANSIBLE_LINT_VERSION}" && \
${HOME}/.local/bin/ansible-lint --offline -c /ansible/.ansible_lint.yaml --project-dir /ansible --format sarif | tee /opt/app-root/src/sarif.txt

###############################################################################
# Final image is the base image plus ansible-lint's output
###############################################################################
FROM ansible
COPY --from=linter /opt/app-root/src/sarif.txt /opt/app-root/src/sarif.txt
ENTRYPOINT ["/opt/app-root/src/.local/bin/ansible-playbook"]
Empty file added Dockerfile.ansible.dockerignore
Empty file.
75 changes: 75 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ ARO_IMAGE_BASE = ${RP_IMAGE_ACR}.azurecr.io/aro
E2E_FLAGS ?= -test.v --ginkgo.v --ginkgo.timeout 180m --ginkgo.flake-attempts=2 --ginkgo.junit-report=e2e-report.xml
E2E_LABEL ?= !smoke
GO_FLAGS ?= -tags=containers_image_openpgp,exclude_graphdriver_btrfs,exclude_graphdriver_devicemapper
PODMAN_VOLUME_OVERLAY=$(shell if [[ $$(getenforce) == "Enforcing" ]]; then echo ":O"; else echo ""; fi 2>/dev/null)

export GOFLAGS=$(GO_FLAGS)

Expand Down Expand Up @@ -540,3 +541,77 @@ run-rp: ci-rp podman-secrets
--secret proxy-client.crt,target=/app/secrets/proxy-client.crt \
--secret proxy.crt,target=/app/secrets/proxy.crt \
$(LOCAL_ARO_RP_IMAGE):$(VERSION) rp

###############################################################################
# Ansible
###############################################################################
.PHONY: ansible-image
ansible-image:
podman $(PODMAN_REMOTE_ARGS) \
build . \
-f Dockerfile.ansible \
--build-arg REGISTRY=$(REGISTRY) \
--build-arg VERSION=$(VERSION) \
--no-cache=$(NO_CACHE) \
--tag aro-ansible:$(VERSION)

LOCATION := eastus
CLUSTERPREFIX := $(USER)
CLUSTERPATTERN := basic
CLEANUP := False
INVENTORY := "hosts.yaml"
SSH_CONFIG_DIR := $(HOME)/.ssh/
SSH_KEY_BASENAME := id_rsa
ifneq ($(CLUSTERPATTERN),*)
CLUSTERFILTER = -l $(CLUSTERPATTERN)
endif
ifeq ($(VERBOSE),False)
SKIP_VERBOSE = --skip-tags verbose
endif

.PHONY: cluster
cluster: cluster-deploy cluster-cleanup
@true
.PHONY: cluster-deploy
cluster-deploy:
podman $(PODMAN_REMOTE_ARGS) \
run \
--rm \
-it \
-v $${AZURE_CONFIG_DIR:-~/.azure}:/opt/app-root/src/.azure$(PODMAN_VOLUME_OVERLAY) \
-v ./ansible:/ansible$(PODMAN_VOLUME_OVERLAY) \
-v $(SSH_CONFIG_DIR):/root/.ssh$(PODMAN_VOLUME_OVERLAY) \
aro-ansible:$(VERSION) \
-i hosts.yaml \
$(CLUSTERFILTER) \
-e location=$(LOCATION) \
-e CLUSTERPREFIX=$(CLUSTERPREFIX) \
-e CLEANUP=$(CLEANUP) \
-e SSH_KEY_BASENAME=$(SSH_KEY_BASENAME) \
$(SKIP_VERBOSE) \
deploy.playbook.yaml
.PHONY: cluster-cleanup
cluster-cleanup:
@if [ "${CLEANUP}" == "True" ]; \
then \
podman $(PODMAN_REMOTE_ARGS) \
run \
--rm \
-it \
-v $${AZURE_CONFIG_DIR:-~/.azure}:/opt/app-root/src/.azure$(PODMAN_VOLUME_OVERLAY) \
-v ./ansible:/ansible$(PODMAN_VOLUME_OVERLAY) \
-v $(SSH_CONFIG_DIR):/root/.ssh$(PODMAN_VOLUME_OVERLAY) \
aro-ansible:$(VERSION) \
-i $(INVENTORY) $(CLUSTERFILTER) \
-e location=$(LOCATION) \
-e CLUSTERPREFIX=$(CLUSTERPREFIX) \
-e CLEANUP=$(CLEANUP) \
-e SSH_KEY_BASENAME=$(SSH_KEY_BASENAME) \
$(SKIP_VERBOSE) \
cleanup.playbook.yaml \
; \
fi

.PHONY: lint-ansible
lint-ansible:
cd ansible; ansible-lint -c .ansible_lint.yaml
10 changes: 10 additions & 0 deletions ansible/.ansible_lint.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
profile: production
exclude_paths: []
use_default_rules: true
skip_list:
- no-changed-when
enable_list:
- args
- empty-string-compare
- no-same-owner
- name[prefix]
4 changes: 4 additions & 0 deletions ansible/ansible-requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
kubernetes==29.0.0
microsoft-kiota-http==1.3.1
msal==1.28.1
msgraph-core==1.0.0
8 changes: 8 additions & 0 deletions ansible/cleanup.playbook.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
- name: Cleanup clusters
hosts: all
gather_facts: false
serial: "{{ max_simultaneous_clusters | default(1) }}"
environment:
AZURE_CORE_SURVEY_MESSAGE: "false"
roles:
- cleanup
21 changes: 21 additions & 0 deletions ansible/deploy.playbook.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
---
- name: Deploy simple clusters
hosts: standard_clusters
gather_facts: false
serial: "{{ max_simultaneous_clusters | default(1) }}"
environment:
AZURE_CORE_SURVEY_MESSAGE: "false"
roles:
- standard_cluster
- smoketest
- cleanup
- name: Bring your own keys disk encryption
hosts: byok_clusters
gather_facts: false
serial: "{{ max_simultaneous_clusters | default(1) }}"
environment:
AZURE_CORE_SURVEY_MESSAGE: "false"
roles:
- byok_cluster
- smoketest
- cleanup
2 changes: 2 additions & 0 deletions ansible/group_vars/all.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
delegation: localhost
upgrade_paths:
143 changes: 143 additions & 0 deletions ansible/hosts.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
---
all:

standard_clusters:
# "standard" in the sense that the unspecialized standard_cluster role will work
# See byok_cluster for an example that is not "standard"
hosts:
basic:
# The simplest possible cluster
name: aro
preview:
name: aro
AZAROEXT_VERSION: 1.0.9
vars:
resource_group: "{{ CLUSTERPREFIX }}-{{ inventory_hostname }}-{{ location }}"
network_prefix_cidr: 10.0.0.0/22
master_cidr: 10.0.0.0/23
master_size: Standard_D8s_v3
worker_cidr: 10.0.2.0/23
worker_size: Standard_D4s_v3
children:
baddns_clusters:
encrypted_clusters:
private_clusters:
udr_clusters:

baddns_clusters:
# Custom DNS pointing to something that doesn't work to make sure
# we still work with uncooperative DNS servers
# https://learn.microsoft.com/en-us/azure/openshift/howto-custom-dns
hosts:
baddns:
private_baddns:
apiserver_visibility: Private
ingress_visibility: Private
domain: baddns.private
baddns413:
version: 4.13.40
private_baddns413:
version: 4.13.40
apiserver_visibility: Private
ingress_visibility: Private
domain: baddns.private
baddns415:
version: 4.15.27
private_baddns415:
version: 4.15.27
apiserver_visibility: Private
ingress_visibility: Private
domain: baddns.private
vars:
name: aro
resource_group: "{{ CLUSTERPREFIX }}-{{ inventory_hostname }}-{{ location }}"
dns_servers:
- 172.16.0.0
network_prefix_cidr: 10.0.0.0/22
master_cidr: 10.0.0.0/23
master_size: Standard_D8s_v3
worker_cidr: 10.0.2.0/23
worker_size: Standard_D4s_v3

byok_clusters:
# Cluster with customer-managed disk encryption key
# https://learn.microsoft.com/en-us/azure/openshift/howto-byok
hosts:
byok:
name: aro
vars:
resource_group: "{{ CLUSTERPREFIX }}-{{ inventory_hostname }}-{{ location }}"
network_prefix_cidr: 10.0.0.0/22
master_cidr: 10.0.0.0/23
master_size: Standard_E8s_v5
worker_cidr: 10.0.2.0/23
worker_size: Standard_D4s_v5

encrypted_clusters:
# Basic cluster with encryption-at-host enabled
hosts:
enc:
name: aro
vars:
resource_group: "{{ CLUSTERPREFIX }}-{{ inventory_hostname }}-{{ location }}"
network_prefix_cidr: 10.0.0.0/22
master_cidr: 10.0.0.0/23
worker_cidr: 10.0.2.0/23
master_size: Standard_E8s_v5
master_encryption_at_host: true
worker_size: Standard_D4s_v5
worker_encryption_at_host: true

private_clusters:
hosts:
private:
# Simple private cluster, no UDR
name: aro
resource_group: "{{ CLUSTERPREFIX }}-private-{{ location }}"
vars:
apiserver_visibility: Private
ingress_visibility: Private
network_prefix_cidr: 10.0.0.0/22
master_cidr: 10.0.0.0/23
master_size: Standard_D8s_v3
worker_cidr: 10.0.2.0/23
worker_size: Standard_D4s_v3


udr_clusters:
# https://learn.microsoft.com/en-us/azure/openshift/howto-create-private-cluster-4x
hosts:
udr:
name: aro
routes:
- name: Blackhole
address_prefix: 0.0.0.0/0
next_hop_type: none
udr_no_null:
name: aro
routes:
- name: To Internet
address_prefix: 0.0.0.0/0
next_hop_type: internet
udr413:
name: aro
version: 4.13.40
routes:
- name: Blackhole
address_prefix: 0.0.0.0/0
next_hop_type: none
udr_no_null413:
name: aro
version: 4.13.40
routes:
- name: To Internet
address_prefix: 0.0.0.0/0
next_hop_type: internet
vars:
resource_group: "{{ CLUSTERPREFIX }}-{{ inventory_hostname }}-{{ location }}"
network_prefix_cidr: 10.0.0.0/22
master_cidr: 10.0.0.0/23
worker_cidr: 10.0.2.0/23
apiserver_visibility: Private
ingress_visibility: Private
outbound_type: UserDefinedRouting
Loading

0 comments on commit 80d4f73

Please sign in to comment.