diff --git a/ansible/azure-nat-gateway.yaml b/ansible/azure-nat-gateway.yaml new file mode 100644 index 00000000..ed6fbf62 --- /dev/null +++ b/ansible/azure-nat-gateway.yaml @@ -0,0 +1,88 @@ +--- + +- name: Configure Azure NAT Gateway + become: false + connection: local + hosts: localhost + gather_facts: false + vars: + kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" + resource_prefix: "coco" + tasks: + - name: Get Azure credentials # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: Secret + namespace: openshift-cloud-controller-manager + name: azure-cloud-credentials + register: azure_credentials + retries: 20 + delay: 5 + + - name: Get Azure configuration # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: ConfigMap + namespace: openshift-cloud-controller-manager + name: cloud-conf + register: azure_cloud_conf + retries: 20 + delay: 5 + + - name: Set facts + ansible.builtin.set_fact: + azure_subscription_id: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['subscriptionId'] }}" + azure_tenant_id: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['tenantId'] }}" + azure_resource_group: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['vnetResourceGroup'] }}" + azure_client_id: "{{ azure_credentials.resources[0]['data']['azure_client_id'] | b64decode }}" + azure_client_secret: "{{ azure_credentials.resources[0]['data']['azure_client_secret'] | b64decode }}" + azure_vnet: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['vnetName'] }}" + azure_subnet: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['subnetName'] }}" + coco_public_ip_name: "{{ resource_prefix }}-pip" + coco_nat_gateway_name: "{{ resource_prefix }}-nat-gateway" + no_log: true + + - name: Create Public IP for NAT Gateway + azure.azcollection.azure_rm_publicipaddress: + subscription_id: "{{ azure_subscription_id }}" + tenant: "{{ azure_tenant_id }}" + client_id: "{{ azure_client_id }}" + secret: "{{ azure_client_secret }}" + resource_group: "{{ azure_resource_group }}" + name: "{{ coco_public_ip_name }}" + sku: "standard" + allocation_method: "static" + + - name: Retrieve Public IP for NAT Gateway + azure.azcollection.azure_rm_publicipaddress_info: + subscription_id: "{{ azure_subscription_id }}" + tenant: "{{ azure_tenant_id }}" + client_id: "{{ azure_client_id }}" + secret: "{{ azure_client_secret }}" + resource_group: "{{ azure_resource_group }}" + name: "{{ coco_public_ip_name }}" + register: coco_gw_public_ip + + - name: Create NAT Gateway + azure.azcollection.azure_rm_natgateway: + subscription_id: "{{ azure_subscription_id }}" + tenant: "{{ azure_tenant_id }}" + client_id: "{{ azure_client_id }}" + secret: "{{ azure_client_secret }}" + resource_group: "{{ azure_resource_group }}" + name: "{{ coco_nat_gateway_name }}" + idle_timeout_in_minutes: 10 + sku: + name: standard + public_ip_addresses: + - "{{ coco_gw_public_ip.publicipaddresses[0].id }}" + register: coco_natgw + + - name: Update the worker subnet to associate NAT gateway + azure.azcollection.azure_rm_subnet: + subscription_id: "{{ azure_subscription_id }}" + tenant: "{{ azure_tenant_id }}" + client_id: "{{ azure_client_id }}" + secret: "{{ azure_client_secret }}" + resource_group: "{{ azure_resource_group }}" + name: "{{ azure_subnet }}" + virtual_network_name: "{{ azure_vnet }}" + nat_gateway: "{{ coco_nat_gateway_name }}" diff --git a/ansible/configure-spire-server-x509pop.yaml b/ansible/configure-spire-server-x509pop.yaml new file mode 100644 index 00000000..fd848887 --- /dev/null +++ b/ansible/configure-spire-server-x509pop.yaml @@ -0,0 +1,144 @@ +--- +# Configure SPIRE Server to support x509pop node attestation for CoCo pods +# The Red Hat SPIRE Operator's SpireServer CRD does not expose x509pop plugin configuration +# This job patches the operator-generated ConfigMap and StatefulSet to add x509pop support +# +# IMPORTANT: The operator must have CREATE_ONLY_MODE=true env var set (via subscription +# config) to prevent it from reverting our manual patches. Without this, the operator +# continuously reconciles and overwrites x509pop changes. +# Note: In v0.2.0 (tech-preview) this was done via a CR annotation +# (ztwim.openshift.io/create-only). In v1.0.0 (GA) it changed to the env var. + +- name: Configure SPIRE Server for x509pop attestation + become: false + connection: local + hosts: localhost + gather_facts: false + vars: + spire_namespace: "zero-trust-workload-identity-manager" + configmap_name: "spire-server" + statefulset_name: "spire-server" + ca_configmap_name: "spire-x509pop-ca" + ca_mount_path: "/run/spire/x509pop-ca" + tasks: + - name: Get ZeroTrustWorkloadIdentityManager CR to determine expected cluster name # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + api_version: operator.openshift.io/v1alpha1 + kind: ZeroTrustWorkloadIdentityManager + name: cluster + register: ztwim_cr + retries: 30 + delay: 10 + until: ztwim_cr.resources | length > 0 + + - name: Extract expected cluster name from ZTWIM CR + ansible.builtin.set_fact: + expected_cluster_name: "{{ ztwim_cr.resources[0].spec.clusterName }}" + + - name: Display expected cluster name + ansible.builtin.debug: + msg: "Expected cluster name from ZTWIM CR: {{ expected_cluster_name }}" + + - name: Wait for SPIRE Server ConfigMap with correct cluster name # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: ConfigMap + namespace: "{{ spire_namespace }}" + name: "{{ configmap_name }}" + register: spire_configmap + retries: 60 + delay: 5 + until: > + spire_configmap.resources | length > 0 and + (spire_configmap.resources[0].data['server.conf'] | from_json + ).plugins.NodeAttestor[0].k8s_psat.plugin_data.clusters[0][expected_cluster_name] is defined + + - name: ConfigMap has correct cluster name + ansible.builtin.debug: + msg: "ConfigMap verified with correct cluster name: {{ expected_cluster_name }}" + + - name: Get current SPIRE Server configuration # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: ConfigMap + namespace: "{{ spire_namespace }}" + name: "{{ configmap_name }}" + register: spire_config + + - name: Parse server configuration + ansible.builtin.set_fact: + server_conf: "{{ spire_config.resources[0].data['server.conf'] | from_json }}" + + - name: Check if x509pop already configured + ansible.builtin.set_fact: + x509pop_exists: "{{ server_conf.plugins.NodeAttestor | selectattr('x509pop', 'defined') | list | length > 0 }}" + + - name: Add x509pop NodeAttestor plugin # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "{{ configmap_name }}" + namespace: "{{ spire_namespace }}" + data: + server.conf: >- + {{ server_conf | combine({'plugins': {'NodeAttestor': + server_conf.plugins.NodeAttestor + [{'x509pop': {'plugin_data': + {'ca_bundle_path': '/run/spire/x509pop-ca/ca-bundle.pem'}}}]}}, + recursive=True) | to_json }} + when: not x509pop_exists + + - name: Wait for SPIRE Server StatefulSet to exist # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: StatefulSet + namespace: "{{ spire_namespace }}" + name: "{{ statefulset_name }}" + register: spire_statefulset + retries: 30 + delay: 10 + until: spire_statefulset.resources | length > 0 + + - name: Check if CA volume already mounted + ansible.builtin.set_fact: + ca_volume_exists: "{{ spire_statefulset.resources[0].spec.template.spec.volumes | selectattr('name', 'equalto', 'x509pop-ca') | list | length > 0 }}" + + - name: Add CA volume to SPIRE Server StatefulSet # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: patched + kind: StatefulSet + namespace: "{{ spire_namespace }}" + name: "{{ statefulset_name }}" + definition: + spec: + template: + spec: + volumes: + - name: x509pop-ca + configMap: + name: "{{ ca_configmap_name }}" + containers: + - name: spire-server + volumeMounts: + - name: x509pop-ca + mountPath: "{{ ca_mount_path }}" + readOnly: true + when: not ca_volume_exists + + - name: Restart SPIRE Server to apply configuration # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: absent + kind: Pod + namespace: "{{ spire_namespace }}" + label_selectors: + - app.kubernetes.io/name=server + when: (not x509pop_exists) or (not ca_volume_exists) + + - name: Configuration status + ansible.builtin.debug: + msg: >- + {{ 'x509pop already configured' if (x509pop_exists and ca_volume_exists) + else 'x509pop NodeAttestor plugin and CA volume mount configured successfully' }} + + - name: Final status + ansible.builtin.debug: + msg: "x509pop configuration complete. CREATE_ONLY_MODE env var on the operator prevents reverts." diff --git a/ansible/generate-certificate.yaml b/ansible/generate-certificate.yaml new file mode 100644 index 00000000..781cedfb --- /dev/null +++ b/ansible/generate-certificate.yaml @@ -0,0 +1,230 @@ +--- +# Generic certificate generation task +# Can generate both CA certificates and agent certificates +# Parameters: +# cert_name: Name for the certificate (e.g., "x509pop-ca", "qtodo-agent") +# cert_type: "ca" or "agent" +# cert_namespace: Kubernetes namespace to store certificate +# output_configmap: Create ConfigMap with cert (true/false) +# output_secret: Create Secret with key (true/false) +# cert_dir: Temporary directory for cert generation +# For agent certs only: +# ca_cert_path: Path to CA certificate file +# ca_key_path: Path to CA private key file +# Certificate details: +# common_name: Certificate CN +# organization: Certificate O +# country: Certificate C +# validity_days: Certificate validity period +# key_usage: List of key usage extensions +# extended_key_usage: List of extended key usage (optional, for agent certs) + +- name: "Set certificate paths for {{ cert_name }}" + ansible.builtin.set_fact: + key_path: "{{ cert_dir }}/{{ cert_name }}-key.pem" + cert_path: "{{ cert_dir }}/{{ cert_name }}-cert.pem" + csr_path: "{{ cert_dir }}/{{ cert_name }}.csr" + +- name: "Check if certificate already exists: {{ cert_name }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: "{{ 'ConfigMap' if output_configmap else 'Secret' }}" + namespace: "{{ cert_namespace }}" + name: "{{ cert_name if output_configmap else (key_secret_name | default(cert_name + '-key')) }}" + register: existing_cert + +- name: "Skip certificate - already exists: {{ cert_name }}" + ansible.builtin.debug: + msg: "Certificate {{ cert_name }} already exists, skipping" + when: existing_cert.resources | length > 0 + +- name: "Generate private key for {{ cert_name }}" + community.crypto.openssl_privatekey: + path: "{{ key_path }}" + size: "{{ 4096 if cert_type == 'ca' else 2048 }}" + when: existing_cert.resources | length == 0 + +- name: "Generate CSR for CA certificate {{ cert_name }}" + community.crypto.openssl_csr: + path: "{{ csr_path }}" + privatekey_path: "{{ key_path }}" + common_name: "{{ common_name }}" + organization_name: "{{ organization }}" + country_name: "{{ country }}" + basic_constraints: + - "CA:TRUE" + basic_constraints_critical: true + key_usage: "{{ key_usage }}" + key_usage_critical: true + when: + - existing_cert.resources | length == 0 + - cert_type == "ca" + +- name: "Generate self-signed CA certificate for {{ cert_name }}" + community.crypto.x509_certificate: + path: "{{ cert_path }}" + csr_path: "{{ csr_path }}" + privatekey_path: "{{ key_path }}" + provider: selfsigned + selfsigned_not_after: "+{{ validity_days }}d" + when: + - existing_cert.resources | length == 0 + - cert_type == "ca" + +- name: "Generate CSR for {{ cert_name }}" + community.crypto.openssl_csr: + path: "{{ csr_path }}" + privatekey_path: "{{ key_path }}" + common_name: "{{ common_name }}" + organization_name: "{{ organization }}" + country_name: "{{ country }}" + key_usage: "{{ key_usage }}" + key_usage_critical: true + extended_key_usage: "{{ extended_key_usage | default(omit) }}" + when: + - existing_cert.resources | length == 0 + - cert_type == "agent" + +- name: "Sign agent certificate for {{ cert_name }}" + community.crypto.x509_certificate: + path: "{{ cert_path }}" + csr_path: "{{ csr_path }}" + provider: ownca + ownca_path: "{{ ca_cert_path }}" + ownca_privatekey_path: "{{ ca_key_path }}" + ownca_not_after: "+{{ validity_days }}d" + when: + - existing_cert.resources | length == 0 + - cert_type == "agent" + +- name: "Create ConfigMap with certificate for {{ cert_name }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "{{ cert_name }}" + namespace: "{{ cert_namespace }}" + data: + ca-bundle.pem: "{{ lookup('file', cert_path) }}" + when: + - existing_cert.resources | length == 0 + - output_configmap | default(false) + +- name: "Check if key secret exists: {{ cert_name }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: Secret + namespace: "{{ cert_namespace }}" + name: "{{ key_secret_name | default(cert_name + '-key') }}" + register: existing_key_secret + when: output_secret | default(false) + +- name: "Create Secret with CA private key for {{ cert_name }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: "{{ key_secret_name | default(cert_name + '-key') }}" + namespace: "{{ cert_namespace }}" + type: Opaque + stringData: + ca-key.pem: "{{ lookup('file', key_path) }}" + when: + - output_secret | default(false) + - (existing_key_secret.resources | default([])) | length == 0 + - cert_type == 'ca' + +- name: "Create Secret with agent private key for {{ cert_name }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: "{{ key_secret_name | default(cert_name + '-key') }}" + namespace: "{{ cert_namespace }}" + type: Opaque + stringData: + key: "{{ lookup('file', key_path) }}" + when: + - output_secret | default(false) + - (existing_key_secret.resources | default([])) | length == 0 + - cert_type == 'agent' + +- name: "Create Secret with certificate for {{ cert_name }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: "{{ cert_secret_name | default(cert_name) }}" + namespace: "{{ cert_namespace }}" + type: Opaque + stringData: + cert: "{{ lookup('file', cert_path) }}" + when: + - existing_cert.resources | length == 0 + - cert_type == "agent" + - not (output_configmap | default(false)) + +# Push agent certificates to Vault for KBS to serve +- name: "Create PushSecret for certificate {{ cert_secret_name | default(cert_name) }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: present + definition: + apiVersion: external-secrets.io/v1alpha1 + kind: PushSecret + metadata: + name: "push-{{ cert_secret_name | default(cert_name) }}" + namespace: "{{ cert_namespace }}" + spec: + updatePolicy: Replace + deletionPolicy: Delete + refreshInterval: 10s + secretStoreRefs: + - name: vault-backend + kind: ClusterSecretStore + selector: + secret: + name: "{{ cert_secret_name | default(cert_name) }}" + data: + - match: + secretKey: cert + remoteRef: + remoteKey: "pushsecrets/{{ cert_secret_name | default(cert_name) }}" + property: cert + when: + - cert_type == "agent" + - not (output_configmap | default(false)) + +- name: "Create PushSecret for private key {{ key_secret_name | default(cert_name + '-key') }}" # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + state: present + definition: + apiVersion: external-secrets.io/v1alpha1 + kind: PushSecret + metadata: + name: "push-{{ key_secret_name | default(cert_name + '-key') }}" + namespace: "{{ cert_namespace }}" + spec: + updatePolicy: Replace + deletionPolicy: Delete + refreshInterval: 10s + secretStoreRefs: + - name: vault-backend + kind: ClusterSecretStore + selector: + secret: + name: "{{ key_secret_name | default(cert_name + '-key') }}" + data: + - match: + secretKey: key + remoteRef: + remoteKey: "pushsecrets/{{ key_secret_name | default(cert_name + '-key') }}" + property: key + when: + - cert_type == "agent" + - output_secret | default(false) diff --git a/ansible/generate-certs.yaml b/ansible/generate-certs.yaml new file mode 100644 index 00000000..151dec32 --- /dev/null +++ b/ansible/generate-certs.yaml @@ -0,0 +1,102 @@ +--- +# Generate SPIRE x509pop certificates for CoCo integration +# Creates CA certificate and agent certificates for all workloads + +- name: Generate SPIRE x509pop certificates + become: false + connection: local + hosts: localhost + gather_facts: false + vars: + spire_namespace: "zero-trust-workload-identity-manager" + trustee_namespace: "trustee-operator-system" + ca_configmap_name: "spire-x509pop-ca" + ca_secret_name: "spire-x509pop-ca-key" + cert_dir: "/tmp/spire-certs" + # Workloads list - should match coco.workloads + workloads: + - name: "qtodo" + namespace: "qtodo" + tasks: + - name: Create temporary certificate directory + ansible.builtin.file: + path: "{{ cert_dir }}" + state: directory + mode: '0700' + + # Generate CA certificate + - name: Generate CA certificate + ansible.builtin.include_tasks: + file: generate-certificate.yaml + vars: + cert_name: "{{ ca_configmap_name }}" + cert_type: "ca" + cert_namespace: "{{ spire_namespace }}" + output_configmap: true + output_secret: true + common_name: "SPIRE x509pop CA" + organization: "Validated Patterns" + country: "US" + validity_days: 3650 # 10 years + key_usage: + - keyCertSign + - cRLSign + + # Retrieve CA for signing agent certificates + - name: Get CA certificate from ConfigMap # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: ConfigMap + namespace: "{{ spire_namespace }}" + name: "{{ ca_configmap_name }}" + register: ca_configmap + + - name: Get CA private key from Secret # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kind: Secret + namespace: "{{ spire_namespace }}" + name: "{{ ca_secret_name }}" + register: ca_secret + + - name: Write CA certificate to temp file + ansible.builtin.copy: + content: "{{ ca_configmap.resources[0].data['ca-bundle.pem'] }}" + dest: "{{ cert_dir }}/ca-cert.pem" + mode: '0600' + + - name: Write CA private key to temp file + ansible.builtin.copy: + content: "{{ ca_secret.resources[0].data['ca-key.pem'] | b64decode }}" + dest: "{{ cert_dir }}/ca-key.pem" + mode: '0600' + + # Generate agent certificates for each workload + - name: Generate agent certificate for each workload + ansible.builtin.include_tasks: + file: generate-certificate.yaml + vars: + cert_name: "spire-{{ workload.name }}" + cert_secret_name: "spire-cert-{{ workload.name }}" + key_secret_name: "spire-key-{{ workload.name }}" + cert_type: "agent" + cert_namespace: "{{ trustee_namespace }}" + output_configmap: false + output_secret: true + ca_cert_path: "{{ cert_dir }}/ca-cert.pem" + ca_key_path: "{{ cert_dir }}/ca-key.pem" + common_name: "spire-agent-{{ workload.name }}" + organization: "Validated Patterns" + country: "US" + validity_days: 365 # 1 year + key_usage: + - digitalSignature + - keyEncipherment + extended_key_usage: + - clientAuth + loop: "{{ workloads }}" + loop_control: + loop_var: workload + + - name: Cleanup temporary files + ansible.builtin.file: + path: "{{ cert_dir }}" + state: absent diff --git a/ansible/init-data-gzipper.yaml b/ansible/init-data-gzipper.yaml new file mode 100644 index 00000000..bc21fba4 --- /dev/null +++ b/ansible/init-data-gzipper.yaml @@ -0,0 +1,76 @@ +- name: Gzip initdata + become: false + connection: local + hosts: localhost + gather_facts: false + vars: + kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" + cluster_platform: "{{ global.clusterPlatform | default('none') | lower }}" + hub_domain: "{{ global.hubClusterDomain | default('none') | lower}}" + image_security_policy: "{{ coco.imageSecurityPolicy | default('insecure') }}" + template_src: "initdata-default.toml.tpl" + tasks: + - name: Create temporary working directory + ansible.builtin.tempfile: + state: directory + suffix: initdata + register: tmpdir + - name: Read KBS TLS secret from Kubernetes # noqa: syntax-check[unknown-module] + kubernetes.core.k8s_info: + kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" + api_version: v1 + kind: Secret + name: kbs-tls-self-signed + namespace: imperative + register: kbs_secret_result + + - name: Extract and decode certificate from secret + ansible.builtin.set_fact: + trustee_cert: "{{ kbs_secret_result.resources[0].data['tls.crt'] | b64decode }}" + when: kbs_secret_result.resources | length > 0 + + - name: Fail if certificate not found + ansible.builtin.fail: + msg: "KBS TLS certificate not found in secret 'kbs-tls-self-signed' in namespace 'imperative'" + when: kbs_secret_result.resources | length == 0 + + - name: Define temp file paths + ansible.builtin.set_fact: + rendered_path: "{{ tmpdir.path }}/rendered.toml" + + - name: Render template to temp file + ansible.builtin.template: + src: "{{ template_src }}" + dest: "{{ rendered_path }}" + mode: "0600" + + + - name: Gzip and base64 encode the rendered content + ansible.builtin.shell: | + set -o pipefail + cat "{{ rendered_path }}" | gzip | base64 -w0 + register: initdata_encoded + changed_when: false + + - name: Compute PCR8 hash from initdata + ansible.builtin.shell: | + set -o pipefail + hash=$(sha256sum "{{ rendered_path }}" | cut -d' ' -f1) + initial_pcr=0000000000000000000000000000000000000000000000000000000000000000 + echo -n "$initial_pcr$hash" | python3 -c "import sys,hashlib; print(hashlib.sha256(bytes.fromhex(sys.stdin.read())).hexdigest())" + register: pcr8_hash + changed_when: false + + - name: Create/update ConfigMap with gzipped+base64 content # noqa: syntax-check[unknown-module] + kubernetes.core.k8s: + kubeconfig: "{{ kubeconfig | default(omit) }}" + state: present + definition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: "initdata" + namespace: "imperative" + data: + INITDATA: "{{ initdata_encoded.stdout }}" + PCR8_HASH: "{{ pcr8_hash.stdout }}" diff --git a/ansible/initdata-default.toml.tpl b/ansible/initdata-default.toml.tpl new file mode 100644 index 00000000..409897f0 --- /dev/null +++ b/ansible/initdata-default.toml.tpl @@ -0,0 +1,78 @@ +# NOTE: PodVMs run in separate VMs outside the cluster network, so they cannot +# resolve cluster-internal service DNS (*.svc.cluster.local). Therefore, we must +# use the external KBS route even for same-cluster deployments. +# For multi-cluster deployments, this also points to the trusted cluster's KBS. + +algorithm = "sha256" +version = "0.1.0" + +[data] +"aa.toml" = ''' +[token_configs] +[token_configs.coco_as] +url = "https://kbs.{{ hub_domain }}" + +[token_configs.kbs] +url = "https://kbs.{{ hub_domain }}" +cert = """{{ trustee_cert }}""" +''' + +"cdh.toml" = ''' +socket = 'unix:///run/confidential-containers/cdh.sock' +credentials = [] + +[kbc] +name = "cc_kbc" +url = "https://kbs.{{ hub_domain }}" +kbs_cert = """{{ trustee_cert }}""" + +[image] +# Container image signature verification policy +# Options: insecure, reject, signed (configured via coco.imageSecurityPolicy in values) +image_security_policy_uri = "kbs:///default/security-policy/{{ image_security_policy }}" +''' + +"policy.rego" = ''' +package agent_policy + +import future.keywords.in +import future.keywords.if +import future.keywords.every + +default AddARPNeighborsRequest := true +default AddSwapRequest := true +default CloseStdinRequest := true +default CopyFileRequest := true +default CreateContainerRequest := true +default CreateSandboxRequest := true +default DestroySandboxRequest := true +default GetMetricsRequest := true +default GetOOMEventRequest := true +default GuestDetailsRequest := true +default ListInterfacesRequest := true +default ListRoutesRequest := true +default MemHotplugByProbeRequest := true +default OnlineCPUMemRequest := true +default PauseContainerRequest := true +default PullImageRequest := true +default ReadStreamRequest := true +default RemoveContainerRequest := true +default RemoveStaleVirtiofsShareMountsRequest := true +default ReseedRandomDevRequest := true +default ResumeContainerRequest := true +default SetGuestDateTimeRequest := true +default SignalProcessRequest := true +default StartContainerRequest := true +default StartTracingRequest := true +default StatsContainerRequest := true +default StopTracingRequest := true +default TtyWinResizeRequest := true +default UpdateContainerRequest := true +default UpdateEphemeralMountsRequest := true +default UpdateInterfaceRequest := true +default UpdateRoutesRequest := true +default WaitProcessRequest := true +default ExecProcessRequest := false +default SetPolicyRequest := true +default WriteStreamRequest := false +''' \ No newline at end of file diff --git a/ansible/install-deps.yaml b/ansible/install-deps.yaml new file mode 100644 index 00000000..95a6e57b --- /dev/null +++ b/ansible/install-deps.yaml @@ -0,0 +1,18 @@ +- name: Retrieve Credentials for AAP on OpenShift + become: false + connection: local + hosts: localhost + gather_facts: false + tasks: + - name: Ensure collection is installed # noqa: syntax-check[unknown-module] + community.general.ansible_galaxy_install: + type: collection + name: azure.azcollection + - name: Ensure community.crypto collection is installed # noqa: syntax-check[unknown-module] + community.general.ansible_galaxy_install: + type: collection + name: community.crypto + - name: Install Azure SDK + ansible.builtin.pip: + requirements: "~/.ansible/collections/ansible_collections/azure/azcollection/requirements.txt" + extra_args: --user diff --git a/charts/hello-coco/Chart.yaml b/charts/hello-coco/Chart.yaml new file mode 100644 index 00000000..1f58642d --- /dev/null +++ b/charts/hello-coco/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +name: hello-coco +description: A Helm chart for SPIRE Agent CoCo test pod demonstrates x509pop attestation with KBS +type: application +version: 0.0.1 +maintainers: + - name: Beraldo Leal + email: bleal@redhat.com + - name: Chris Butler + email: chris.butler@redhat.com +keywords: + - spire + - coco + - confidentialcontainers + - attestation + - x509pop +annotations: + category: Test diff --git a/charts/hello-coco/templates/configmaps.yaml b/charts/hello-coco/templates/configmaps.yaml new file mode 100644 index 00000000..439b2a96 --- /dev/null +++ b/charts/hello-coco/templates/configmaps.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent-coco + namespace: zero-trust-workload-identity-manager +data: + agent.conf: | + { + "agent": { + "data_dir": "/var/lib/spire", + "log_level": "debug", + "retry_bootstrap": true, + "server_address": "spire-server.zero-trust-workload-identity-manager", + "server_port": "443", + "socket_path": "/tmp/spire-agent/public/spire-agent.sock", + "trust_bundle_path": "/run/spire/bundle/bundle.crt", + "trust_domain": "apps.{{ .Values.global.clusterDomain }}" + }, + "health_checks": { + "bind_address": "0.0.0.0", + "bind_port": 9982, + "listener_enabled": true, + "live_path": "/live", + "ready_path": "/ready" + }, + "plugins": { + "KeyManager": [ + { + "disk": { + "plugin_data": { + "directory": "/var/lib/spire" + } + } + } + ], + "NodeAttestor": [ + { + "x509pop": { + "plugin_data": { + "private_key_path": "/sealed/key.pem", + "certificate_path": "/sealed/cert.pem" + } + } + } + ], + "WorkloadAttestor": [ + { + "unix": { + "plugin_data": {} + } + } + ] + }, + "telemetry": { + "Prometheus": { + "host": "0.0.0.0", + "port": "9402" + } + } + } +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: spiffe-helper-config + namespace: zero-trust-workload-identity-manager +data: + helper.conf: |- + agent_address = "/tmp/spire-agent/public/spire-agent.sock" + cmd = "" + cmd_args = "" + cert_dir = "/svids" + renew_signal = "" + svid_file_name = "svid.pem" + svid_key_file_name = "svid_key.pem" + svid_bundle_file_name = "svid_bundle.pem" diff --git a/charts/hello-coco/templates/pod.yaml b/charts/hello-coco/templates/pod.yaml new file mode 100644 index 00000000..2fe85e72 --- /dev/null +++ b/charts/hello-coco/templates/pod.yaml @@ -0,0 +1,164 @@ +# SPIRE Agent with x509pop attestation running in CoCo peer pod. +# Uses CDH sealed secrets for agent credentials (cert/key fetched from KBS after TEE attestation). +apiVersion: v1 +kind: Pod +metadata: + name: spire-agent-cc + namespace: zero-trust-workload-identity-manager + labels: + app: spire-agent-cc +spec: + runtimeClassName: kata-remote + # shareProcessNamespace allows SPIRE agent to inspect workload processes for unix attestation + # This is secure because the real isolation boundary is the confidential VM (peer-pod with TEE), + # not individual containers. All containers in this pod are part of the same trust boundary. + shareProcessNamespace: true + serviceAccountName: spire-agent + imagePullSecrets: + - name: pull-secret + + containers: + # SPIRE Agent Sidecar + - name: spire-agent + image: registry.redhat.io/zero-trust-workload-identity-manager/spiffe-spire-agent-rhel9@sha256:4073ef462525c2ea1326f3c44ec630e33cbab4b428e8314a85d38756c2460831 + command: ["/bin/sh", "-c"] + args: + - | + echo "=== DEBUG: Checking /sealed mount ===" + ls -laR /sealed || echo "/sealed does not exist" + echo "=== DEBUG: Content of cert.pem (first 200 bytes) ===" + head -c 200 /sealed/cert.pem 2>&1 || echo "Cannot read cert.pem" + echo "=== DEBUG: Testing network connectivity to KBS (cluster-internal) ===" + curl -k -I https://kbs-service.trustee-operator-system.svc.cluster.local:8080 2>&1 | head -20 + echo "=== DEBUG: Testing if CDH is running (HTTP on localhost:8006) ===" + curl -v http://127.0.0.1:8006/cdh/resource/default/spire-cert-qtodo/cert 2>&1 | head -50 + echo "=== DEBUG: Starting spire-agent ===" + /spire-agent run -config /opt/spire/conf/agent/agent.conf + env: + - name: PATH + value: "/opt/spire/bin:/bin" + - name: MY_NODE_NAME + value: "coco-vm-node" # Virtual node name for CoCo + ports: + - containerPort: 9982 + name: healthz + protocol: TCP + livenessProbe: + httpGet: + path: /live + port: healthz + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 60 + readinessProbe: + httpGet: + path: /ready + port: healthz + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + volumeMounts: + - name: spire-config + mountPath: /opt/spire/conf/agent + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-socket + mountPath: /tmp/spire-agent/public + - name: spire-persistence + mountPath: /var/lib/spire + - name: sealed-creds + mountPath: /sealed + readOnly: true + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + + # SPIFFE Helper Sidecar + - name: spiffe-helper + image: ghcr.io/spiffe/spiffe-helper:0.10.1 + imagePullPolicy: IfNotPresent + args: + - "-config" + - "/etc/helper.conf" + volumeMounts: + - name: spiffe-helper-config + readOnly: true + mountPath: /etc/helper.conf + subPath: helper.conf + - name: spire-socket + readOnly: true + mountPath: /tmp/spire-agent/public + - name: svids + mountPath: /svids + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + seccompProfile: + type: RuntimeDefault + + # Test Workload Container + - name: test-workload + image: registry.redhat.io/ubi9/ubi-minimal:latest + command: ["/bin/sh", "-c"] + args: + - | + echo "=== SPIRE Agent CoCo Test Started ===" + echo "Waiting for SPIFFE certificates..." + + # Wait for SPIFFE certificates + while [ ! -f /svids/svid.pem ]; do + echo "Waiting for SPIFFE certificates..." + sleep 2 + done + + echo "SPIFFE certificates found!" + ls -la /svids/ + + echo "=== Testing SPIFFE X.509 certificates ===" + echo "Certificate details:" + openssl x509 -in /svids/svid.pem -text -noout | head -20 + + echo "=== Sleeping for manual inspection ===" + sleep 3600 + volumeMounts: + - name: svids + mountPath: /svids + readOnly: true + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + seccompProfile: + type: RuntimeDefault + + volumes: + - name: spire-config + configMap: + name: spire-agent-coco + - name: spiffe-helper-config + configMap: + name: spiffe-helper-config + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-socket + emptyDir: {} + - name: spire-persistence + emptyDir: {} + - name: sealed-creds + secret: + secretName: {{ .Values.sealedSecret.name }} + - name: svids + emptyDir: {} diff --git a/charts/hello-coco/templates/pull-secret-external.yaml b/charts/hello-coco/templates/pull-secret-external.yaml new file mode 100644 index 00000000..5757e3b8 --- /dev/null +++ b/charts/hello-coco/templates/pull-secret-external.yaml @@ -0,0 +1,21 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: pull-secret + namespace: {{ .Release.Namespace }} +spec: + refreshInterval: 1h + secretStoreRef: + name: openshift-config + kind: SecretStore + target: + name: pull-secret + template: + type: kubernetes.io/dockerconfigjson + data: + .dockerconfigjson: "{{ `{{ .dockerconfigjson | toString }}` }}" + data: + - secretKey: dockerconfigjson + remoteRef: + key: pull-secret + property: .dockerconfigjson diff --git a/charts/hello-coco/templates/pull-secret-rbac.yaml b/charts/hello-coco/templates/pull-secret-rbac.yaml new file mode 100644 index 00000000..bd97a147 --- /dev/null +++ b/charts/hello-coco/templates/pull-secret-rbac.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pull-secret-reader + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pull-secret-reader +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["pull-secret"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get"] +- apiGroups: ["authorization.k8s.io"] + resources: ["selfsubjectrulesreviews"] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pull-secret-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pull-secret-reader +subjects: +- kind: ServiceAccount + name: pull-secret-reader + namespace: {{ .Release.Namespace }} diff --git a/charts/hello-coco/templates/pull-secret-store.yaml b/charts/hello-coco/templates/pull-secret-store.yaml new file mode 100644 index 00000000..2032bf98 --- /dev/null +++ b/charts/hello-coco/templates/pull-secret-store.yaml @@ -0,0 +1,17 @@ +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: openshift-config + namespace: {{ .Release.Namespace }} +spec: + provider: + kubernetes: + remoteNamespace: openshift-config + server: + caProvider: + type: ConfigMap + name: kube-root-ca.crt + key: ca.crt + auth: + serviceAccount: + name: pull-secret-reader diff --git a/charts/hello-coco/templates/sealed-secret.yaml b/charts/hello-coco/templates/sealed-secret.yaml new file mode 100644 index 00000000..2769ed02 --- /dev/null +++ b/charts/hello-coco/templates/sealed-secret.yaml @@ -0,0 +1,22 @@ +# Sealed Secret for SPIRE agent x509pop attestation +# +# This creates a K8s Secret with sealed secret references that CDH will unseal +# inside the TEE after successful hardware attestation. +# +# Format: sealed. where JWS is header.payload.signature +# The payload is base64url-encoded (RFC 7515: no padding, URL-safe alphabet). +# Helm's b64enc produces standard base64, so we convert to base64url. +# +{{- define "hello-coco.sealedRef" -}} +{{- $json := printf `{"version":"0.1.0","type":"vault","name":"kbs:///%s","provider":"kbs","provider_settings":{},"annotations":{}}` . -}} +sealed.fakejwsheader.{{ $json | b64enc | replace "+" "-" | replace "/" "_" | trimSuffix "=" | trimSuffix "=" }}.fakesignature +{{- end }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.sealedSecret.name }} + namespace: zero-trust-workload-identity-manager +type: Opaque +stringData: + cert.pem: {{ include "hello-coco.sealedRef" .Values.sealedSecret.certPath | quote }} + key.pem: {{ include "hello-coco.sealedRef" .Values.sealedSecret.keyPath | quote }} diff --git a/charts/hello-coco/values.yaml b/charts/hello-coco/values.yaml new file mode 100644 index 00000000..9dd2adfa --- /dev/null +++ b/charts/hello-coco/values.yaml @@ -0,0 +1,22 @@ +# Default values for hello coco + +# SPIRE trust domain +# The SPIRE agent must be configured with the same trust domain as the SPIRE Server +# This ensures the agent can successfully authenticate and workloads receive valid SPIFFE IDs +# Typically set to apps. +trustDomain: "apps.example.com" + +# KBS URL for CDH (Confidential Data Hub) to fetch sealed secrets after TEE attestation +# Dev (single cluster): http://kbs-service.trustee-operator-system.svc.cluster.local:8080 +# Prod (separate trusted cluster): https://kbs.trusted-cluster.example.com +kbsUrl: "http://kbs-service.trustee-operator-system.svc.cluster.local:8080" + +# Sealed secret configuration for SPIRE agent x509pop attestation +# These are KBS resource paths where the agent cert/key are stored +sealedSecret: + # Name of the K8s Secret to create with sealed references + name: "spire-agent-sealed-creds" + # KBS resource path for the certificate (e.g., default/spire-cert-qtodo/cert) + certPath: "default/spire-cert-qtodo/cert" + # KBS resource path for the private key (e.g., default/spire-key-qtodo/key) + keyPath: "default/spire-key-qtodo/key" diff --git a/docs/CONFIDENTIAL-CONTAINERS.md b/docs/CONFIDENTIAL-CONTAINERS.md new file mode 100644 index 00000000..e5537cc6 --- /dev/null +++ b/docs/CONFIDENTIAL-CONTAINERS.md @@ -0,0 +1,174 @@ +# Confidential Containers Integration + +This document describes how to deploy the Layered Zero Trust Validated +Pattern with Confidential Containers (CoCo) support. CoCo extends the +pattern with hardware-rooted workload identity: SPIRE agent runs inside +a confidential VM (peer pod) and uses x509pop attestation backed by TEE +hardware attestation to KBS. + +## Architecture + +In a production deployment, Trustee (the attestation server) should run +on a separate trusted cluster, since it verifies the integrity of the +infrastructure where workloads run. Running it on the same cluster +means the attestation server shares the untrusted infrastructure it is +supposed to verify. A single cluster deployment is fine for development +and testing. + +The SPIRE agent runs as a sidecar container inside each CoCo peer pod. +This is different from the regular ZTVP deployment where agents run as +a DaemonSet on each node. In the CoCo model, the agent must be inside +the confidential VM so that its identity is rooted in hardware +attestation. Each CoCo workload gets its own SPIRE agent instance. + +The trust chain: + +1. Peer pod VM created inside a TEE (AMD SEV-SNP or Intel TDX) +2. Confidential Data Hub (CDH) inside the TEE attests to KBS +3. KBS validates the TEE evidence and returns sealed secrets +4. SPIRE agent loads x509pop certificates from the unsealed secrets +5. Agent connects to SPIRE server and performs x509pop node attestation +6. Workload receives X509-SVID via Unix attestation through spiffe-helper + +## Prerequisites + +- Cloud provider region with confidential VM quota for peer pod VMs + (worker nodes themselves do not need to be confidential) +- Vault as the secret backend + +### Azure Instance Types + +Azure confidential VM SKU families: + +- DCasv5: AMD Milan (SEV-SNP) +- DCasv6: AMD Genoa (SEV-SNP) +- DCesv6: Intel TDX + +Availability varies by region. The default configuration uses +Standard_DC2as_v5. Change the VM flavor in values-coco-dev.yaml under +the sandbox-policies app overrides if your region requires a different +SKU. + +## Deployment + +### 1. Configure clusterGroupName + +Edit values-global.yaml and set the clusterGroupName to coco-dev: + +```yaml +main: + clusterGroupName: coco-dev +``` + +Commit and push this change before deploying. + +### 2. Generate secrets + +Run the pre-deployment scripts from the pattern root: + +```bash +./scripts/gen-secrets-coco.sh +./scripts/get-pcr.sh +``` + +gen-secrets-coco.sh creates the cryptographic keys that Trustee (the +attestation server) needs to authenticate requests. It also copies the +values-secret template if not already present. Safe to re-run (will +not overwrite existing files). + +get-pcr.sh retrieves the expected hardware measurements for the +confidential VM image. Trustee compares these against the measurements +reported by the actual hardware to decide whether a VM is genuine. +Requires a Red Hat pull secret (defaults to ~/pull-secret.json, or +set the PULL_SECRET env var). + +Both scripts output to ~/.config/validated-patterns/trustee/. + +### 3. Edit the secrets template + +Edit ~/.config/validated-patterns/values-secret-layered-zero-trust.yaml +and uncomment the CoCo secrets section. Each secret has inline comments +in the template explaining its purpose and how to populate it. + +### 4. Deploy + +```bash +# If deploying from a fork, set TARGET_ORIGIN to your git remote name: +# TARGET_ORIGIN=myfork ./pattern.sh make install +./pattern.sh make install +``` + +Wait for all ArgoCD apps to reach Healthy/Synced. CoCo apps (sandbox, +trustee, sandbox-policies) reference CRDs created by the operators. On +first deploy, ArgoCD may try to sync these apps before the operator +has finished installing and registering its CRDs. This is normal and +resolves automatically once the operator CSV succeeds and ArgoCD +retries the sync. + +The imperative framework runs jobs on a 10-minute schedule for: + +- Azure NAT gateway configuration +- initdata generation and compression +- SPIRE x509pop certificate generation +- SPIRE server x509pop plugin configuration + +### 5. Create SPIRE workload registration entry + +The regular SPIRE agents (DaemonSet) use the k8s workload attestor, +which identifies workloads through the kubelet API. In the CoCo model, +the infrastructure (including Kubernetes) is untrusted. The SPIRE agent +runs inside the confidential VM where the kubelet is not accessible by +design, ensuring workload identity is rooted in hardware attestation +rather than the cluster control plane. The agent uses the Unix workload +attestor instead, which identifies processes by UID over the Unix +socket. Because of this, ClusterSPIFFEID CRDs do not apply and +registration entries must be created manually: + +```bash +oc exec -n zero-trust-workload-identity-manager spire-server-0 -- \ + spire-server entry create \ + -parentID "spiffe:///spire/agent/x509pop/" \ + -spiffeID "spiffe:///ns/zero-trust-workload-identity-manager/sa/spire-agent" \ + -selector "unix:uid:1000800000" +``` + +The parentID cert fingerprint comes from the x509pop certificate. The +UID is assigned by OpenShift based on the namespace UID range. + +## Verification + +Check the hello-coco pod is running with 3/3 containers: + +```bash +oc get pod -n zero-trust-workload-identity-manager hello-coco +``` + +Check that SVIDs were issued: + +```bash +oc exec -n zero-trust-workload-identity-manager hello-coco \ + -c test-workload -- ls -la /svids/ +``` + +Expected files: svid.pem, svid_key.pem, svid_bundle.pem. + +Verify attestation from inside the TEE: + +```bash +oc exec -n zero-trust-workload-identity-manager hello-coco \ + -c test-workload -- \ + curl http://127.0.0.1:8006/cdh/resource/default/attestation-status/status +``` + +Should return the value configured in the attestationStatus secret. + +## Known Limitations + +1. The ZTWIM operator CRD does not support x509pop plugin configuration. + An imperative job patches the SPIRE server ConfigMap and StatefulSet + directly. CREATE_ONLY_MODE must be enabled to prevent the operator + from reverting these patches. + +2. For now, SPIRE workload registration entries for CoCo pods must be + created manually. The ClusterSPIFFEID CRD only works with + k8s-attested agents. We are working on alternatives to automate this. diff --git a/overrides/values-Azure.yaml b/overrides/values-Azure.yaml new file mode 100644 index 00000000..b2a15e5e --- /dev/null +++ b/overrides/values-Azure.yaml @@ -0,0 +1,8 @@ +# Azure platform-specific configuration + +# CoCo confidential computing configuration for Azure +global: + coco: + azure: + defaultVMFlavour: "Standard_DC2eds_v5" + VMFlavours: "Standard_DC2eds_v5,Standard_DC4eds_v5,Standard_DC8eds_v5,Standard_DC16eds_v5" diff --git a/overrides/values-sandbox.yaml b/overrides/values-sandbox.yaml new file mode 100644 index 00000000..cf7cf984 --- /dev/null +++ b/overrides/values-sandbox.yaml @@ -0,0 +1,8 @@ +# Override the default values for the sandboxed-containers chart +# Configures External Secrets Operator integration for Azure SSH keys + +# Secret store configuration for External Secrets Operator +# Points to the ClusterSecretStore that knows how to connect to Vault +secretStore: + name: vault-backend + kind: ClusterSecretStore diff --git a/overrides/values-trustee.yaml b/overrides/values-trustee.yaml new file mode 100644 index 00000000..a6048365 --- /dev/null +++ b/overrides/values-trustee.yaml @@ -0,0 +1,27 @@ +# Override the default values for the trustee chart +# This lists the secret resources that are uploaded to your chosen ESO backend (default: Vault). +# It does not contain the secrets themselves, only references to Vault paths. +# +# NOTE: When adding new CoCo workloads to coco.workloads, you must also add +# corresponding spire-cert-{workload} and spire-key-{workload} entries here + +# Secret store configuration for External Secrets Operator +# Points to the ClusterSecretStore that knows how to connect to Vault +secretStore: + name: vault-backend + kind: ClusterSecretStore + +global: + coco: + secured: true + +kbs: + secretResources: + - name: "passphrase" + key: "secret/data/hub/passphrase" + # SPIRE x509pop certificates per workload type + # Created by the generate-certs imperative job directly as K8s Secrets + # in trustee-operator-system (not managed by Vault/ESO) + extraSecrets: + - spire-cert-qtodo + - spire-key-qtodo diff --git a/scripts/gen-secrets-coco.sh b/scripts/gen-secrets-coco.sh new file mode 100755 index 00000000..4e8882ed --- /dev/null +++ b/scripts/gen-secrets-coco.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +set -e + +# Generate cryptographic material required by the CoCo components. +# Run this once before your first deployment. +# +# Creates: +# - KBS Ed25519 keypair for Trustee admin API authentication +# - Copies values-secret.yaml.template to ~/.config/validated-patterns/values-secret-.yaml (if not present) +# +# Will not overwrite existing files. Delete ~/.config/validated-patterns/trustee/ +# to regenerate the keypair. + +SECRETS_DIR="${HOME}/.config/validated-patterns/trustee" +KBS_PRIVATE_KEY="${SECRETS_DIR}/kbsPrivateKey" +KBS_PUBLIC_KEY="${SECRETS_DIR}/kbsPublicKey" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PATTERN_DIR="${SCRIPT_DIR}/.." + +# Determine pattern name from values-global.yaml +PATTERN_NAME=$(yq eval '.global.pattern' "${PATTERN_DIR}/values-global.yaml" 2>/dev/null) +if [ -z "$PATTERN_NAME" ] || [ "$PATTERN_NAME" == "null" ]; then + echo "ERROR: Could not determine pattern name from values-global.yaml" + exit 1 +fi + +VALUES_FILE="${HOME}/.config/validated-patterns/values-secret-${PATTERN_NAME}.yaml" + +mkdir -p "${SECRETS_DIR}" + +# Generate KBS Ed25519 keypair +if [ ! -f "${KBS_PRIVATE_KEY}" ]; then + echo "Generating KBS Ed25519 keypair..." + rm -f "${KBS_PUBLIC_KEY}" + openssl genpkey -algorithm ed25519 > "${KBS_PRIVATE_KEY}" + openssl pkey -in "${KBS_PRIVATE_KEY}" -pubout -out "${KBS_PUBLIC_KEY}" + chmod 600 "${KBS_PRIVATE_KEY}" + echo " Private key: ${KBS_PRIVATE_KEY}" + echo " Public key: ${KBS_PUBLIC_KEY}" +else + echo "KBS keypair already exists, skipping." +fi + +# Copy values-secret template +if [ ! -f "${VALUES_FILE}" ]; then + echo "Copying values-secret template to ${VALUES_FILE}" + echo "Please review before deploying." + cp "${PATTERN_DIR}/values-secret.yaml.template" "${VALUES_FILE}" +else + echo "Values file already exists: ${VALUES_FILE}" +fi + +echo "" +echo "Next steps:" +echo " 1. Run ./scripts/get-pcr.sh to retrieve PCR measurements" +echo " 2. Review ${VALUES_FILE}" +echo " 3. Run ./pattern.sh make install" diff --git a/scripts/get-pcr.sh b/scripts/get-pcr.sh new file mode 100755 index 00000000..a4129501 --- /dev/null +++ b/scripts/get-pcr.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash +set -e + +# Retrieve PCR measurements from the sandboxed container operator's dm-verity image. +# These measurements are required for the pcrStash secret used by Trustee for attestation. +# Run this before ./pattern.sh make load-secrets. + +OUTPUT_DIR="${HOME}/.config/validated-patterns/trustee" + +# 1. Locate pull secret +PULL_SECRET_PATH="${HOME}/pull-secret.json" +if [ ! -f "$PULL_SECRET_PATH" ]; then + if [ -n "${PULL_SECRET}" ]; then + PULL_SECRET_PATH="${PULL_SECRET}" + if [ ! -f "$PULL_SECRET_PATH" ]; then + echo "ERROR: Pull secret file not found at path specified in PULL_SECRET: $PULL_SECRET_PATH" + exit 1 + fi + else + echo "ERROR: Pull secret not found at ~/pull-secret.json" + echo "Please either place your pull secret at ~/pull-secret.json or set the PULL_SECRET environment variable" + exit 1 + fi +fi + +echo "Using pull secret: $PULL_SECRET_PATH" + +# 2. Check for required tools +for cmd in yq skopeo jq podman; do + if ! command -v "$cmd" &> /dev/null; then + echo "ERROR: $cmd is required but not installed" + exit 1 + fi +done + +# 3. Check values-global.yaml exists +if [ ! -f "values-global.yaml" ]; then + echo "ERROR: values-global.yaml not found in current directory" + echo "Please run this script from the root directory of the project" + exit 1 +fi + +# 4. Get the active clusterGroupName from values-global.yaml +CLUSTER_GROUP_NAME=$(yq eval '.main.clusterGroupName' values-global.yaml) + +if [ -z "$CLUSTER_GROUP_NAME" ] || [ "$CLUSTER_GROUP_NAME" == "null" ]; then + echo "ERROR: Could not determine clusterGroupName from values-global.yaml" + echo "Expected: main.clusterGroupName to be set" + exit 1 +fi + +echo "Active clusterGroup: $CLUSTER_GROUP_NAME" + +# 5. Locate the values file for the active clusterGroup +VALUES_FILE="values-${CLUSTER_GROUP_NAME}.yaml" + +if [ ! -f "$VALUES_FILE" ]; then + echo "ERROR: Values file for clusterGroup not found: $VALUES_FILE" + exit 1 +fi + +# 6. Get the sandboxed container operator CSV from the clusterGroup values +SANDBOX_CSV=$(yq eval '.clusterGroup.subscriptions.sandbox.csv // .clusterGroup.subscriptions.sandboxed.csv' "$VALUES_FILE") + +if [ -z "$SANDBOX_CSV" ] || [ "$SANDBOX_CSV" == "null" ]; then + echo "ERROR: No sandboxed container operator CSV found in $VALUES_FILE" + echo "The subscription clusterGroup.subscriptions.sandbox.csv (or .sandboxed.csv) is not defined" + exit 1 +fi + +# Extract version from CSV (e.g., "sandboxed-containers-operator.v1.11.0" -> "1.11.0") +SANDBOX_VERSION="${SANDBOX_CSV##*.v}" + +echo "Sandboxed container operator CSV: $SANDBOX_CSV" +echo "Version: $SANDBOX_VERSION" + +VERITY_IMAGE=registry.redhat.io/openshift-sandboxed-containers/osc-dm-verity-image + +TAG=$(skopeo inspect --authfile "$PULL_SECRET_PATH" "docker://${VERITY_IMAGE}:${SANDBOX_VERSION}" | jq -r .Digest) + +IMAGE=${VERITY_IMAGE}@${TAG} + +echo "IMAGE: $IMAGE" + +# Ensure output directory exists +mkdir -p "$OUTPUT_DIR" + +# Clean up any existing measurement files +rm -f "$OUTPUT_DIR/measurements-raw.json" "$OUTPUT_DIR/measurements.json" + +# Download the measurements using podman cp +podman pull --authfile "$PULL_SECRET_PATH" "$IMAGE" + +cid=$(podman create --entrypoint /bin/true "$IMAGE") +echo "CID: ${cid}" +podman cp "$cid:/image/measurements.json" "$OUTPUT_DIR/measurements-raw.json" +podman rm "$cid" + +# Trim leading "0x" from all measurement values +jq 'walk(if type == "string" and startswith("0x") then .[2:] else . end)' \ + "$OUTPUT_DIR/measurements-raw.json" > "$OUTPUT_DIR/measurements.json" + +echo "Measurements saved to $OUTPUT_DIR/measurements.json (0x prefixes removed)" diff --git a/values-coco-dev.yaml b/values-coco-dev.yaml new file mode 100644 index 00000000..731b4a43 --- /dev/null +++ b/values-coco-dev.yaml @@ -0,0 +1,521 @@ +# CoCo Development Configuration (Single Cluster) +# Combines ZTVP (SPIRE/Keycloak/Vault) with CoCo (Trustee/Sandboxed Containers) +# All components deployed on single cluster for development/testing +# +# WARNING: NOT RECOMMENDED FOR PRODUCTION +# This configuration runs Trustee/KBS on the same untrusted cluster as worker nodes. +# Production deployments should use multi-cluster setup with Trustee on a trusted cluster. + +global: + options: + autoApproveManualInstallPlans: true + +# This spire config is required to fix a bug in the zero-trust-workload-identity-manager operator +spire: + oidcDiscoveryProvider: + ingress: + enabled: true + annotations: + route.openshift.io/termination: reencrypt + route.openshift.io/destination-ca-certificate-secret: spire-bundle + +# Moved outside of clusterGroup to avoid validation errors +# CoCo workload types for x509pop certificate generation +# One SPIRE agent certificate is generated per workload type +# All pods of the same workload type (e.g., all qtodo pods) share the same agent certificate +# NOTE: KBS resource policy should be added to enforce workload-specific certificate access +coco: + # Container image signature verification policy + # Options: insecure (accept all), reject (reject all), signed (require cosign signature) + # See values-secret.yaml.template for policy definitions + imageSecurityPolicy: insecure + workloads: + - name: "qtodo" + namespace: "qtodo" + +clusterGroup: + name: coco-dev + isHubCluster: true + namespaces: + - open-cluster-management + - vault + # - qtodo # COMMENTED OUT for coco-dev + - golang-external-secrets + # COMMENTED OUT for coco-dev: Keycloak not needed + # - keycloak-system: + # operatorGroup: true + # targetNamespace: keycloak-system + - cert-manager + - cert-manager-operator: + operatorGroup: true + targetNamespace: cert-manager-operator + # Layer 1: Quay Registry (for container image storage and signing) + # COMMENTED OUT: Uncomment to enable integrated Quay registry + # - openshift-storage: + # operatorGroup: true + # targetNamespace: openshift-storage + # annotations: + # openshift.io/cluster-monitoring: "true" + # argocd.argoproj.io/sync-wave: "-5" # Propagated to OperatorGroup by framework + # - quay-enterprise: + # annotations: + # argocd.argoproj.io/sync-wave: "1" # Create before NooBaa and all Quay components + # labels: + # openshift.io/cluster-monitoring: "true" + # RHTAS namespace (required when RHTAS application is enabled) + # COMMENTED OUT: Uncomment to enable RHTAS with SPIFFE signing + # - trusted-artifact-signer: + # annotations: + # argocd.argoproj.io/sync-wave: "1" # Auto-created by RHTAS operator + # labels: + # openshift.io/cluster-monitoring: "true" + - zero-trust-workload-identity-manager: + operatorGroup: true + targetNamespace: zero-trust-workload-identity-manager + - openshift-compliance: + operatorGroup: true + targetNamespace: openshift-compliance + annotations: + openshift.io/cluster-monitoring: "true" + # CoCo namespaces + - openshift-sandboxed-containers-operator + - trustee-operator-system + subscriptions: + acm: + name: advanced-cluster-management + namespace: open-cluster-management + channel: release-2.14 + catalogSource: redhat-operators + cert-manager: + name: openshift-cert-manager-operator + namespace: cert-manager-operator + channel: stable-v1 + catalogSource: redhat-marketplace + # COMMENTED OUT for coco-dev: Keycloak operator not needed + # rhbk: + # name: rhbk-operator + # namespace: keycloak-system + # channel: stable-v26.2 + # catalogSource: redhat-marketplace + zero-trust-workload-identity-manager: + name: openshift-zero-trust-workload-identity-manager + namespace: zero-trust-workload-identity-manager + channel: stable-v1 + catalogSource: redhat-marketplace + config: + env: + - name: CREATE_ONLY_MODE + value: "true" + compliance-operator: + name: compliance-operator + namespace: openshift-compliance + channel: stable + catalogSource: redhat-marketplace + config: + nodeSelector: + node-role.kubernetes.io/worker: "" + # CoCo subscriptions + sandboxed: + name: sandboxed-containers-operator + namespace: openshift-sandboxed-containers-operator + source: redhat-operators + channel: stable + installPlanApproval: Manual + csv: sandboxed-containers-operator.v1.11.1 + trustee: + name: trustee-operator + namespace: trustee-operator-system + source: redhat-operators + channel: stable + installPlanApproval: Manual + csv: trustee-operator.v1.0.0 + config: + env: + - name: KBS_IMAGE_NAME + # trustee-operator v1.0.0 ships a KBS image that only validates + # AMD Milan VCEK certificates. This image includes the fix for + # Genoa (DCasv6). Remove once a new operator release ships the fix. + # See: https://github.com/confidential-containers/trustee/pull/1184 + value: "quay.io/confidential-containers/trustee:v1.0.0-genoa-fix" + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 10m + memory: 256Mi + # Storage and Registry operator subscriptions + # COMMENTED OUT: Uncomment to enable integrated Quay registry + # ODF provides object storage backend (NooBaa) for Quay and RHTPA + # odf: + # name: odf-operator + # namespace: openshift-storage + # channel: stable-4.19 + # annotations: + # argocd.argoproj.io/sync-wave: "-4" # Install after OperatorGroup (-5) + # quay-operator: + # name: quay-operator + # namespace: openshift-operators + # channel: stable-3.15 + # annotations: + # argocd.argoproj.io/sync-wave: "-3" # Install after ODF operator + # RHTAS operator subscription (required when RHTAS application is enabled) + # COMMENTED OUT: Uncomment to enable RHTAS with SPIFFE integration + # rhtas-operator: + # name: rhtas-operator + # namespace: openshift-operators + # channel: stable + # annotations: + # argocd.argoproj.io/sync-wave: "-2" # Install after Quay operator, before applications + # catalogSource: redhat-operators + projects: + - hub + # Explicitly mention the cluster-state based overrides we plan to use for this pattern. + # We can use self-referential variables because the chart calls the tpl function with these variables defined + sharedValueFiles: + - '/overrides/values-{{ $.Values.global.clusterPlatform }}.yaml' + # sharedValueFiles is a flexible mechanism that will add the listed valuefiles to every app defined in the + # applications section. We intend this to supplement and possibly even replace previous "magic" mechanisms, though + # we do not at present have a target date for removal. + # + # To replicate the "classic" magic include structure, the clusterGroup would need all of these + # sharedValueFiles, in this order: + # - '/overrides/values-{{ $.Values.global.clusterPlatform }}.yaml' + # - '/overrides/values-{{ $.Values.global.clusterPlatform }}-{{ $.Values.global.clusterVersion }}.yaml' + # - '/overrides/values-{{ $.Values.global.clusterPlatform }}-{{ $.Values.clusterGroup.name }}.yaml' + # - '/overrides/values-{{ $.Values.global.clusterVersion }}-{{ $.Values.clusterGroup.name }}.yaml" + # - '/overrides/values-{{ $.Values.global.localClusterName }}.yaml' + + # This kind of variable substitution will work with any of the variables the Validated Patterns operator knows + # about and sets, so this is also possible, for example: + # - '/overrides/values-{{ $.Values.global.hubClusterDomain }}.yaml' + # - '/overrides/values-{{ $.Values.global.localClusterDomain }}.yaml' + applications: + acm: + name: acm + namespace: open-cluster-management + project: hub + chart: acm + chartVersion: 0.1.* + ignoreDifferences: + - group: internal.open-cluster-management.io + kind: ManagedClusterInfo + jsonPointers: + - /spec/loggingCA + # We override the secret store because we are not provisioning clusters + overrides: + - name: global.secretStore.backend + value: none + acm-managed-clusters: + name: acm-managed-clusters + project: hub + path: charts/acm-managed-clusters + ignoreDifferences: + - group: cluster.open-cluster-management.io + kind: ManagedCluster + jsonPointers: + - /metadata/labels/cloud + - /metadata/labels/vendor + compliance-scanning: + name: compliance-scanning + namespace: openshift-compliance + annotations: + argocd.argoproj.io/sync-wave: '-30' + project: hub + path: charts/compliance-scanning + vault: + name: vault + namespace: vault + project: hub + chart: hashicorp-vault + chartVersion: 0.1.* + policies: [] + jwt: + enabled: true + oidcDiscoveryUrl: https://spire-spiffe-oidc-discovery-provider.zero-trust-workload-identity-manager.svc.cluster.local + oidcDiscoveryCa: /run/secrets/kubernetes.io/serviceaccount/service-ca.crt + defaultRole: qtodo + roles: + - name: qtodo + audience: qtodo + subject: spiffe://apps.{{ $.Values.global.clusterDomain }}/ns/qtodo/sa/qtodo + policies: + - global-secret + # Shared Object Storage Backend + # COMMENTED OUT: Uncomment to enable integrated Quay registry + # NooBaa MCG provides S3-compatible object storage for multiple applications + # Direct consumers: Quay (container image storage) + # noobaa-mcg: + # name: noobaa-mcg + # namespace: openshift-storage + # project: hub + # path: charts/noobaa-mcg + # annotations: + # argocd.argoproj.io/sync-wave: "5" # Deploy after core services + # Quay Container Registry (uses NooBaa for storage) + # quay-registry: + # name: quay-registry + # namespace: quay-enterprise + # project: hub + # path: charts/quay-registry + # annotations: + # argocd.argoproj.io/sync-wave: "10" # Deploy after NooBaa storage backend + # RHTAS with SPIFFE Integration + # COMMENTED OUT: Uncomment to enable RHTAS with SPIFFE and Email issuers + # Depends on: Vault, SPIRE, Keycloak (for Email OIDC issuer if used) + # trusted-artifact-signer: + # name: trusted-artifact-signer + # namespace: trusted-artifact-signer + # project: hub + # path: charts/rhtas-operator + # annotations: + # argocd.argoproj.io/sync-wave: "15" # Deploy after dependencies + # overrides: + # # OIDC Issuer Configuration - Both can be enabled simultaneously + # # Enable SPIFFE issuer for workload identity + # - name: rhtas.zeroTrust.spire.enabled + # value: "true" + # - name: rhtas.zeroTrust.spire.trustDomain + # value: "apps.{{ $.Values.global.clusterDomain }}" + # - name: rhtas.zeroTrust.spire.issuer + # value: "https://spire-spiffe-oidc-discovery-provider.apps.{{ $.Values.global.clusterDomain }}" + # # Enable Keycloak issuer for user/email authentication + # - name: rhtas.zeroTrust.email.enabled + # value: "true" + # - name: rhtas.zeroTrust.email.issuer + # value: https://keycloak.apps.{{ $.Values.global.clusterDomain }}/realms/ztvp + golang-external-secrets: + name: golang-external-secrets + namespace: golang-external-secrets + project: hub + chart: golang-external-secrets + chartVersion: 0.1.* + # COMMENTED OUT for coco-dev: Keycloak is for user auth (not needed for CoCo testing) + # rh-keycloak: + # name: rh-keycloak + # namespace: keycloak-system + # project: hub + # path: charts/keycloak + rh-cert-manager: + name: rh-cert-manager + namespace: cert-manager-operator + project: hub + path: charts/certmanager + zero-trust-workload-identity-manager: + name: zero-trust-workload-identity-manager + namespace: zero-trust-workload-identity-manager + project: hub + path: charts/zero-trust-workload-identity-manager + overrides: + - name: spire.clusterName + value: hub + # COMMENTED OUT for coco-dev: qtodo demo app requires Keycloak/Vault (not needed for CoCo testing) + # qtodo: + # name: qtodo + # namespace: qtodo + # project: hub + # path: charts/qtodo + # overrides: + # - name: app.oidc.enabled + # value: "true" + # - name: app.spire.enabled + # value: "true" + # - name: app.vault.url + # value: https://vault.vault.svc.cluster.local:8200 + # - name: app.vault.role + # value: qtodo + # - name: app.vault.secretPath + # value: secret/data/global/qtodo + trustee: + name: trustee + namespace: trustee-operator-system + project: hub + chart: trustee + chartVersion: 0.2.* + extraValueFiles: + - /overrides/values-trustee.yaml + sandbox: + name: sandbox + namespace: openshift-sandboxed-containers-operator + project: hub + chart: sandboxed-containers + chartVersion: 0.2.* + overrides: + - name: global.secretStore.backend + value: vault + - name: secretStore.name + value: vault-backend + - name: secretStore.kind + value: ClusterSecretStore + # CoCo peer-pods configuration via ACM Policy + # Creates peer-pods-cm ConfigMap with platform-specific cluster configuration + # Uses ACM Policy template functions (fromConfigMap) to auto-discover cluster settings + # from cloud-controller-manager instead of requiring manual oc commands or imperative jobs + # Required for peer-pods to provision confidential VMs in the correct network environment + sandbox-policies: + name: sandbox-policies + namespace: openshift-sandboxed-containers-operator + project: hub + chart: sandboxed-policies + chartVersion: 0.1.* + overrides: + - name: global.coco.azure.defaultVMFlavour + value: Standard_DC2as_v5 + - name: global.coco.azure.VMFlavours + value: "Standard_DC2as_v5,Standard_DC4as_v5,Standard_DC8as_v5,Standard_DC16as_v5" + - name: global.coco.azure.tags + value: "owner=bleal@redhat.com" + - name: global.coco.azure.rootVolumeSize + value: "20" + hello-coco: + name: hello-coco + namespace: zero-trust-workload-identity-manager + project: hub + path: charts/hello-coco + syncPolicy: + automated: {} + retry: + limit: 50 + argoCD: + resourceExclusions: | + - apiGroups: + - internal.open-cluster-management.io + kinds: + - ManagedClusterInfo + clusters: + - "*" + + imperative: + # NOTE: We *must* use lists and not hashes. As hashes lose ordering once parsed by helm + # The default schedule is every 10 minutes: imperative.schedule + # Total timeout of all jobs is 1h: imperative.activeDeadlineSeconds + # imagePullPolicy is set to always: imperative.imagePullPolicy + # For additional overrides that apply to the jobs, please refer to + # https://hybrid-cloud-patterns.io/imperative-actions/#additional-job-customizations + serviceAccountName: imperative-admin-sa + jobs: + - name: install-deps + playbook: ansible/install-deps.yaml + verbosity: -vvv + timeout: 180 + - name: configure-azure-nat-gateway + playbook: ansible/azure-nat-gateway.yaml + verbosity: -vvv + timeout: 60 + - name: init-data-gzipper + playbook: ansible/init-data-gzipper.yaml + verbosity: -vvv + timeout: 60 + - name: generate-certs + playbook: ansible/generate-certs.yaml + verbosity: -vvv + timeout: 60 + - name: configure-spire-server-x509pop + playbook: ansible/configure-spire-server-x509pop.yaml + verbosity: -vvv + timeout: 240 + managedClusterGroups: {} + # This configuration can be used for Pipeline/DevSecOps (UC-01 / UC-02) + # devel: + # name: devel + # helmOverrides: + # - name: clusterGroup.isHubCluster + # value: false + # clusterSelector: + # matchLabels: + # clusterGroup: devel + # matchExpressions: + # - key: vendor + # operator: In + # values: + # - OpenShift + # production: + # name: production + # helmOverrides: + # - name: clusterGroup.isHubCluster + # value: false + # clusterSelector: + # matchLabels: + # clusterGroup: production + # matchExpressions: + # - key: vendor + # operator: In + # values: + # - OpenShift + # End of Pipeline/DevSecOps configuration + + # exampleRegion: + # name: group-one + # acmlabels: + # - name: clusterGroup + # value: group-one + # helmOverrides: + # - name: clusterGroup.isHubCluster + # value: false +# To have apps in multiple flavors, use namespaces and use helm overrides as appropriate +# +# pipelines: +# name: pipelines +# namespace: production +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: stable +# overrides: +# - name: myparam +# value: myparam +# +# pipelines_staging: +# - name: pipelines +# namespace: staging +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: main +# +# Additional applications +# Be sure to include additional resources your apps will require +# +X machines +# +Y RAM +# +Z CPU +# vendor-app: +# name: vendor-app +# namespace: default +# project: vendor +# path: path/to/myapp +# repoURL: https://github.com/vendor/applications.git +# targetRevision: main + +# managedSites: +# factory: +# name: factory +# # repoURL: https://github.com/dagger-refuse-cool/manuela-factory.git +# targetRevision: main +# path: applications/factory +# helmOverrides: +# - name: site.isHubCluster +# value: false +# clusterSelector: +# matchExpressions: +# - key: vendor +# operator: In +# values: +# - OpenShift + + +# List of previously provisioned clusters to import and manage from the Hub cluster +acmManagedClusters: + clusters: [] + # This configuration can be used for Pipeline/DevSecOps (UC-01 / UC-02) + # - name: ztvp-spoke-1 + # clusterGroup: devel + # labels: + # cloud: auto-detect + # vendor: auto-detect + # kubeconfigVaultPath: secret/data/hub/kubeconfig-spoke-1 + # - name: ztvp-spoke-2 + # clusterGroup: production + # labels: + # cloud: auto-detect + # vendor: auto-detect + # kubeconfigVaultPath: secret/data/hub/kubeconfig-spoke-2 diff --git a/values-secret.yaml.template b/values-secret.yaml.template index ac5091b9..9185fc4f 100644 --- a/values-secret.yaml.template +++ b/values-secret.yaml.template @@ -203,6 +203,132 @@ secrets: # value: "your-registry-token" # Replace with your token/password # onMissingValue: error + # =========================================================================== + # COCO (CONFIDENTIAL CONTAINERS) SECRETS + # Uncomment the secrets below when deploying with CoCo support. + # Pre-deployment steps: + # 1. Run ./scripts/gen-secrets-coco.sh to generate KBS keypair + # 2. Run ./scripts/get-pcr.sh to retrieve PCR measurements + # =========================================================================== + + # SSH keys for podvm debug access (optional). + # Note: dm-verity based podvm images do not support SSH key injection by design. + # This only works with non-dm-verity images built with SSH debug enabled. + #- name: sshKey + # vaultPrefixes: + # - global + # fields: + # - name: id_rsa.pub + # path: ~/.config/validated-patterns/id_rsa.pub + # - name: id_rsa + # path: ~/.config/validated-patterns/id_rsa + + # Container Image Signature Verification Policy + # Controls which container images are allowed to run in confidential containers. + # The policy is fetched by the TEE via initdata using image_security_policy_uri. + # + # Three policy variants are provided: + # - insecure: Accept all images (for development/testing only) + # - reject: Reject all images (useful for testing policy enforcement) + # - signed: Only accept images signed with cosign (for production) + # + # Select policy in initdata: + # image_security_policy_uri = 'kbs:///default/security-policy/insecure' + # + # TODO: Rename to 'container-image-policy' in trustee-chart to better reflect + # that this is about container image signature verification, not general security policy. + #- name: securityPolicyConfig + # vaultPrefixes: + # - hub + # fields: + # # Accept all images without verification (INSECURE - dev/testing only) + # - name: insecure + # value: | + # { + # "default": [{"type": "insecureAcceptAnything"}], + # "transports": {} + # } + # # Reject all images (useful for testing policy enforcement) + # - name: reject + # value: | + # { + # "default": [{"type": "reject"}], + # "transports": {} + # } + # # Only accept signed images (production) + # # Edit the transports section to add your signed images. + # # Each image needs a corresponding cosign public key in cosign-keys secret. + # - name: signed + # value: | + # { + # "default": [{"type": "reject"}], + # "transports": { + # "docker": { + # "registry.example.com/my-image": [ + # { + # "type": "sigstoreSigned", + # "keyPath": "kbs:///default/cosign-keys/key-0" + # } + # ] + # } + # } + # } + + # PCR measurements for attestation. + # Required: run ./scripts/get-pcr.sh before deploying. + #- name: pcrStash + # vaultPrefixes: + # - hub + # fields: + # - name: json + # path: ~/.config/validated-patterns/trustee/measurements.json + + # Attestation status resource accessible via KBS/CDH from inside the TEE. + # Workloads can fetch this to confirm they are running in an attested environment. + #- name: attestationStatus + # vaultPrefixes: + # - hub + # fields: + # - name: status + # value: 'attested' + # - name: random + # value: '' + # onMissingValue: generate + # vaultPolicy: validatedPatternDefaultPolicy + + # Cosign public keys for image signature verification + # Required when using the "signed" policy above. + # Add your cosign public key files here. + # Generate a cosign key pair: cosign generate-key-pair + #- name: cosign-keys + # vaultPrefixes: + # - hub + # fields: + # - name: key-0 + # path: ~/.config/validated-patterns/trustee/cosign-key-0.pub + + # KBS authentication keys (Ed25519) for Trustee admin API + # Generate with: + # mkdir -p ~/.config/validated-patterns/trustee + # openssl genpkey -algorithm ed25519 > ~/.config/validated-patterns/trustee/kbsPrivateKey + # openssl pkey -in ~/.config/validated-patterns/trustee/kbsPrivateKey -pubout -out ~/.config/validated-patterns/trustee/kbsPublicKey + # chmod 600 ~/.config/validated-patterns/trustee/kbsPrivateKey + #- name: kbsPublicKey + # vaultPrefixes: + # - hub + # fields: + # - name: publicKey + # path: ~/.config/validated-patterns/trustee/kbsPublicKey + + #- name: passphrase + # vaultPrefixes: + # - hub + # fields: + # - name: passphrase + # value: '' + # onMissingValue: generate + # vaultPolicy: validatedPatternDefaultPolicy + # =========================================================================== # HUB-SPECIFIC SECRETS (hub/) # Secrets for hub cluster management (spoke kubeconfigs, etc.)