diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 459e23ba43c8..f96593911d57 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -3490,7 +3490,7 @@ Topics: File: ztp-preparing-the-hub-cluster - Name: Updating GitOps ZTP File: ztp-updating-gitops -- Name: Installing managed clusters with RHACM and SiteConfig resources +- Name: Installing managed clusters with RHACM and ClusterInstance resources File: ztp-deploying-far-edge-sites - Name: Manually installing a single-node OpenShift cluster with GitOps ZTP File: ztp-manual-install diff --git a/edge_computing/ztp-deploying-far-edge-sites.adoc b/edge_computing/ztp-deploying-far-edge-sites.adoc index d5dceddecfa8..dc009d50064f 100644 --- a/edge_computing/ztp-deploying-far-edge-sites.adoc +++ b/edge_computing/ztp-deploying-far-edge-sites.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="ztp-deploying-far-edge-sites"] -= Installing managed clusters with {rh-rhacm} and SiteConfig resources += Installing managed clusters with {rh-rhacm} and ClusterInstance resources include::_attributes/common-attributes.adoc[] :context: ztp-deploying-far-edge-sites @@ -35,7 +35,7 @@ include::modules/ztp-deploying-a-site.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-sno-siteconfig-config-reference_ztp-deploying-far-edge-sites[{sno-caps} SiteConfig CR installation reference] +* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-clusterinstance-config-reference_ztp-deploying-far-edge-sites[{sno-caps} ClusterInstance CR installation reference] include::modules/ztp-sno-accelerated-ztp.adoc[leveloffset=+2] @@ -67,7 +67,7 @@ include::modules/ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno.adoc[lev include::modules/ztp-verifying-ipsec.adoc[leveloffset=+2] -include::modules/ztp-sno-siteconfig-config-reference.adoc[leveloffset=+2] +include::modules/ztp-clusterinstance-config-reference.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources @@ -112,7 +112,9 @@ include::modules/ztp-site-cleanup.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* For information about removing a cluster, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.9/html/clusters/cluster_mce_overview#remove-managed-cluster[Removing a cluster from management]. +* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.9/html/clusters/cluster_mce_overview#remove-managed-cluster[Removing a cluster from management]. + +* link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/2.15/html/multicluster_engine_operator_with_red_hat_advanced_cluster_management/ibio-intro#deprovision-clusters[Deprovisioning clusters] include::modules/ztp-removing-obsolete-content.adoc[leveloffset=+1] diff --git a/edge_computing/ztp-manual-install.adoc b/edge_computing/ztp-manual-install.adoc index b2713852800e..3d1a38621f43 100644 --- a/edge_computing/ztp-manual-install.adoc +++ b/edge_computing/ztp-manual-install.adoc @@ -31,7 +31,7 @@ include::modules/ztp-generating-install-and-config-crs-manually.adoc[leveloffset * xref:../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#root-device-hints_preparing-to-install-with-agent-based-installer[About root device hints] -* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-sno-siteconfig-config-reference_ztp-deploying-far-edge-sites[{sno-caps} SiteConfig CR installation reference] +* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-clusterinstance-config-reference_ztp-deploying-far-edge-sites[{sno-caps} ClusterInstance CR installation reference] include::modules/ztp-creating-the-site-secrets.adoc[leveloffset=+1] diff --git a/modules/ztp-clusterinstance-config-reference.adoc b/modules/ztp-clusterinstance-config-reference.adoc new file mode 100644 index 000000000000..fd457f0135e7 --- /dev/null +++ b/modules/ztp-clusterinstance-config-reference.adoc @@ -0,0 +1,203 @@ +// Module included in the following assemblies: +// +// * scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-sites.adoc + +:_mod-docs-content-type: REFERENCE +[id="ztp-clusterinstance-config-reference_{context}"] += ClusterInstance CR installation reference + +The following tables describe the cluster-level and node-level `ClusterInstance` custom resource (CR) fields used for cluster installation. + +== Cluster-level ClusterInstance CR fields + +.ClusterInstance CR cluster-level fields +[cols="1,3", options="header"] +|==== +|ClusterInstance CR field +|Description + +|`spec.clusterName` +| The name of the cluster. + +|`spec.baseDomain` +|The base domain to use for the deployed cluster. + +|`spec.pullSecretRef.name` +|The name of the secret containing the pull secret to use when pulling images. The secret must exist in the same namespace as the `ClusterInstance` CR. + +|`spec.clusterImageSetNameRef` +|The name of the `ClusterImageSet` resource indicating which {product-title} version to deploy. + +|`spec.sshPublicKey` +|Optional. The public SSH key to authenticate SSH access to the cluster nodes. + +|`spec.templateRefs` +a|A list of references to cluster-level templates. A cluster-level template consists of a `ConfigMap` in which the keys of the data field represent the kind of the installation manifest(s). Cluster-level templates are instantiated once per cluster. + +|`spec.extraLabels` +a|Optional. Additional cluster-wide labels to be applied to the rendered templates. This is a nested map structure where the outer key is the resource type (for example, `ManagedCluster`) and the inner map contains the label key-value pairs. + +|`spec.extraAnnotations` +|Optional. Additional cluster-wide annotations to be applied to the rendered templates. Uses the same nested map structure as `extraLabels`. + +|`spec.extraManifestsRefs` +a|Optional. A list of `ConfigMap` references containing additional manifests to be applied to the cluster at install time. Manifests must be bundled in `ConfigMap` resources. + +|`spec.suppressedManifests` +|Optional. A list of manifest names to be excluded from the template rendering process by the SiteConfig Operator. + +|`spec.pruneManifests` +a|Optional. A list of manifests to remove. Each entry requires `apiVersion` and `kind`. + +|`spec.installConfigOverrides` +a|Optional. A JSON formatted string that provides a generic way of passing `install-config` parameters. + +[IMPORTANT] +==== +Use the reference configuration as specified in the example `ClusterInstance` CR. Adding additional components back into the system might require additional reserved CPU capacity. +==== +// TODO: Is this note still relevant? + +|`spec.cpuPartitioningMode` +|Optional. Configure workload partitioning by setting the value to `AllNodes`. The default is `None`. To complete the configuration, specify the `isolated` and `reserved` CPUs in the `PerformanceProfile` CR. + +|`spec.networkType` +|Optional. The Container Network Interface (CNI) plug-in to install. Valid values are `OpenShiftSDN` or `OVNKubernetes`. The default is `OVNKubernetes`. + +|`spec.clusterNetwork` +a|Optional. The list of IP address pools for pods. + +|`spec.machineNetwork` +a|Optional. The list of IP address pools for machines. + +|`spec.serviceNetwork` +a|Optional. The list of IP address pools for services. + +|`spec.apiVIPs` +|Optional. The virtual IPs used to reach the OpenShift cluster API. Enter one IP address for single-stack clusters, or up to two for dual-stack clusters. + +|`spec.ingressVIPs` +|Optional. The virtual IPs used for cluster ingress traffic. Enter one IP address for single-stack clusters, or up to two for dual-stack clusters. + +|`spec.additionalNTPSources` +|Optional. A list of NTP sources (hostname or IP) to be added to all cluster hosts. + +|`spec.diskEncryption` +a|Optional. Configure this field to enable disk encryption for cluster nodes + +|`spec.diskEncryption.type` +|Set the disk encryption type. + +|`spec.diskEncryption.tang` +|Optional. Configure Tang server settings for disk encryption. + +|`spec.proxy` +|Optional. Configure proxy settings that you want to use for the install config. + +|`spec.caBundleRef` +|Optional. Reference to a `ConfigMap` containing the bundle of trusted certificates for the host. This field is referenced by image-based installations only. +// TODO: Is this correct? I got it from a matrix in the slidedeck. + +|`spec.platformType` +|Optional. The name for the platform for the installation. Valid values are `BareMetal`, `None`, `VSphere`, `Nutanix`, or `External`. + +|`spec.cpuArchitecture` +|Optional. The software architecture used for nodes that do not have an architecture defined. Valid values are `x86_64`, `aarch64`, or `multi`. The default is `x86_64`. + +|`spec.clusterType` +|Optional. The type of cluster. Valid values are `SNO`, `HighlyAvailable`, `HostedControlPlane`, or `HighlyAvailableArbiter`. + +|`spec.holdInstallation` +|Optional. When set to `true`, prevents installation from happening. Inspection and validation proceed as usual, but installation does not begin until this field is set to `false`. The default is `false`. + +|`spec.ignitionConfigOverride` +|Optional. A JSON formatted string containing the user overrides for the ignition config. + +|`spec.reinstall` +|Optional. Configuration for reinstallation of the cluster. Includes `generation` and `preservationMode` fields. +|==== + +== Node-level ClusterInstance CR fields + +.ClusterInstance CR node-level fields +[cols="1,3", options="header"] +|==== +|ClusterInstance CR field +|Description + +|`spec.nodes` +|A list of node objects defining the hosts in the cluster. + +|`spec.nodes[].hostName` +|The desired hostname for the host. For single-node deployments, define a single host. For three-node deployments, define three hosts. For standard deployments, define three hosts with `role: master` and two or more hosts with `role: worker`. +// TODO: Is the additional naming guidance still valid? It was in the SiteConfig resource reference for this field previously. + +|`spec.nodes[].role` +|Optional. The role of the node. Valid values are `master`, `worker`, or `arbiter`. The default is `master`. + +|`spec.nodes[].bmcAddress` +a|The BMC address used to access the host. {ztp} supports iPXE and virtual media booting by using Redfish or IPMI protocols. For more information about BMC addressing, see the "Additional resources" section. + +[NOTE] +==== +In far edge Telco use cases, only virtual media is supported for use with {ztp}. +==== + +|`spec.nodes[].bmcCredentialsName.name` +|The name of the `Secret` CR containing the BMC credentials. When creating the secret, use the same namespace as the `ClusterInstance` CR. + +|`spec.nodes[].bootMACAddress` +|The MAC address used for PXE boot. + +|`spec.nodes[].bootMode` +|Optional. The boot mode for the host. The default value is `UEFI`. Use `UEFISecureBoot` to enable secure boot on the host. + +|`spec.nodes[].rootDeviceHints` +|Optional. Specifies the device for deployment. Use disk identifiers that are stable across reboots. For example, `wwn: ` or `deviceName: /dev/disk/by-path/`. For a detailed list of stable identifiers, see the "About root device hints" section. + +|`spec.nodes[].automatedCleaningMode` +|Optional. When set to `disabled`, the provisioning service does not automatically clean the disk during provisioning and deprovisioning. Set the value to `metadata` to remove the disk's partitioning table only, without fully wiping the disk. The default value is `disabled`. + +|`spec.nodes[].nodeNetwork` +|Optional. Configure the network settings for the node. + +|`spec.nodes[].nodeNetwork.interfaces` +|Optional. Configure the network interfaces for the node. + +|`spec.nodes[].nodeNetwork.config` +|Optional. Configure the NMState network configuration for the node, including interfaces, DNS, and routes. + +|`spec.nodes[].nodeLabels` +|Optional. Specify custom roles for your nodes in your managed clusters. These are additional roles not used by any {product-title} components. When you add a custom role, it can be associated with a custom machine config pool that references a specific configuration for that role. Adding custom labels or roles during installation makes the deployment process more effective and prevents the need for additional reboots after the installation is complete. + +|`spec.nodes[].ignitionConfigOverride` +|Optional. A JSON formatted string containing the overrides for the host's ignition config. Use this field to assign partitions for persistent storage. Adjust the disk ID and size to the specific hardware. + +|`spec.nodes[].installerArgs` +|Optional. A JSON formatted string containing the user overrides for the host's CoreOS installer args. + +|`spec.nodes[].templateRefs` +a|A list of references to node-level templates. A node-level template consists of a `ConfigMap` in which the keys of the data field represent the kind of the installation manifests. Node-level templates are instantiated once for each node. + +|`spec.nodes[].extraLabels` +|Optional. Additional node-level labels to be applied to the rendered templates. Uses the same nested map structure as the cluster-level `extraLabels`. + +|`spec.nodes[].extraAnnotations` +|Optional. Additional node-level annotations to be applied to the rendered templates. + +|`spec.nodes[].suppressedManifests` +|Optional. A list of node-level manifest names to be excluded from the template rendering process. + +|`spec.nodes[].pruneManifests` +|Optional. A list of mode-level manifests to remove. Each entry requires `apiVersion` and `kind`. + +|`spec.nodes[].cpuArchitecture` +|Optional. The software architecture of the node. If you do not define a value, the value is inherited from `spec.cpuArchitecture`. Valid values are `x86_64` or `aarch64`. + +|`spec.nodes[].ironicInspect` +|Optional. Disable automatic introspection during registration of the BMH by specifying `disabled` for this field. Automatic introspection by the provisioning service is enabled by default. + +|`spec.nodes[].hostRef` +|Optional. Reference to an existing `BareMetalHost` resource located in another namespace. Includes `name` and `namespace` fields. +|==== + diff --git a/modules/ztp-configuring-host-firmware-with-gitops-ztp.adoc b/modules/ztp-configuring-host-firmware-with-gitops-ztp.adoc index 78646ad507b3..bdb57d5a803c 100644 --- a/modules/ztp-configuring-host-firmware-with-gitops-ztp.adoc +++ b/modules/ztp-configuring-host-firmware-with-gitops-ztp.adoc @@ -13,8 +13,9 @@ Tune hosts with specific hardware profiles in your lab and ensure they are optim When you have completed host tuning to your satisfaction, you extract the host profile and save it in your {ztp} repository. Then, you use the host profile to configure firmware settings in the managed cluster hosts that you deploy with {ztp}. -You specify the required hardware profiles in `SiteConfig` custom resources (CRs) that you use to deploy the managed clusters. -The {ztp} pipeline generates the required `HostFirmwareSettings` (`HFS`) and `BareMetalHost` (`BMH`) CRs that are applied to the hub cluster. +You specify the required hardware profiles by creating custom node templates that include `HostFirmwareSettings` CRs, and reference them in the `spec.nodes[].templateRefs` field of your `ClusterInstance` CR. +The {ztp} pipeline generates the required `HostFirmwareSettings` and `BareMetalHost` CRs that are applied to the hub cluster. +//TODO: Is this true for ClusterInstance workflow too? Use the following best practices to manage your host firmware profiles. diff --git a/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno.adoc b/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno.adoc index 091a287e0fcd..5a9b6ca09cdb 100644 --- a/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno.adoc +++ b/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno_{context}"] -= Configuring IPsec encryption for multi-node clusters using {ztp} and SiteConfig resources += Configuring IPsec encryption for multi-node clusters using {ztp} and ClusterInstance resources You can enable IPsec encryption in managed multi-node clusters that you install using {ztp} and {rh-rhacm-first}. You can encrypt traffic between the managed cluster and IPsec endpoints external to the managed cluster. All network traffic between nodes on the OVN-Kubernetes cluster network is encrypted with IPsec in Transport mode. @@ -15,6 +15,8 @@ You can encrypt traffic between the managed cluster and IPsec endpoints external * You have logged in to the hub cluster as a user with `cluster-admin` privileges. +* You have installed the SiteConfig Operator in the hub cluster. + * You have configured {rh-rhacm} and the hub cluster for generating the required installation and policy custom resources (CRs) for managed clusters. * You have created a Git repository where you manage your custom site configuration data. @@ -119,40 +121,72 @@ out <1> The `ipsec/import-certs.sh` script generates the Butane and endpoint configuration CRs. <2> Add the `ca.pem` and `left_server.p12` certificate files that are relevant to your network. -. Create a `custom-manifest/` folder in the repository where you manage your custom site configuration data and add the `enable-ipsec.yaml` and `99-ipsec-*` YAML files to the directory. +. Create an `ipsec-manifests/` folder in the repository where you manage your custom site configuration data and add the `enable-ipsec.yaml` and `99-ipsec-*` YAML files to the directory. + -.Example `siteconfig` directory +.Example site configuration directory [source,terminal] ---- -siteconfig - ├── site1-mno-du.yaml - ├── extra-manifest/ - └── custom-manifest - ├── enable-ipsec.yaml - ├── 99-ipsec-master-import-certs.yaml - └── 99-ipsec-worker-import-certs.yaml +site-configs/ + ├── hub-1/ + │ └── clusterinstance-site1-mno-du.yaml + ├── ipsec-manifests/ + │ ├── enable-ipsec.yaml + │ ├── 99-ipsec-master-import-certs.yaml + │ └── 99-ipsec-worker-import-certs.yaml + └── kustomization.yaml ---- -. In your `SiteConfig` CR, add the `custom-manifest/` directory to the `extraManifests.searchPaths` field, as in the following example: +. Create a `kustomization.yaml` file that uses `configMapGenerator` to package your IPsec manifests into a `ConfigMap`: + [source,yaml] ---- -clusters: -- clusterName: "site1-mno-du" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - hub-1/clusterinstance-site1-mno-du.yaml +configMapGenerator: + - name: ipsec-manifests-cm + namespace: site1-mno-du <1> + files: + - ipsec-manifests/enable-ipsec.yaml + - ipsec-manifests/99-ipsec-master-import-certs.yaml + - ipsec-manifests/99-ipsec-worker-import-certs.yaml +generatorOptions: + disableNameSuffixHash: true <2> +---- +<1> The namespace must match the `ClusterInstance` namespace. +<2> Disables the hash suffix so the `ConfigMap` name is predictable. + +. In your `ClusterInstance` CR, reference the `ConfigMap` in the `extraManifestsRefs` field: ++ +[source,yaml] +---- +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance +metadata: + name: "site1-mno-du" + namespace: "site1-mno-du" +spec: + clusterName: "site1-mno-du" networkType: "OVNKubernetes" - extraManifests: - searchPaths: - - extra-manifest/ - - custom-manifest/ + extraManifestsRefs: + - name: ipsec-manifests-cm <1> +# ... ---- +<1> Reference to the `ConfigMap` containing the IPsec certificate import manifests. ++ +[NOTE] +==== +If you have other extra manifests, you can either include them in the same `ConfigMap` or create multiple `ConfigMap` resources and reference them all in `extraManifestsRefs`. +==== . Include the `ipsec-config-policy.yaml` config policy file in the `source-crs` directory in GitOps and reference the file in one of the `PolicyGenerator` CRs. -. Commit the `SiteConfig` CR changes and updated files in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption. +. Commit the `ClusterInstance` CR, IPsec manifest files, and `kustomization.yaml` changes in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption. + The Argo CD pipeline detects the changes and begins the managed cluster deployment. + -During cluster provisioning, the {ztp} pipeline appends the CRs in the `custom-manifest/` directory to the default set of extra manifests stored in the `extra-manifest/` directory. +During cluster provisioning, the SiteConfig Operator applies the CRs contained in the referenced `ConfigMap` resources as extra manifests. The IPsec configuration policy is applied as a Day 2 operation after the cluster is provisioned. .Verification diff --git a/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig.adoc b/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig.adoc index 4e118dafa685..bfc6e678190c 100644 --- a/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig.adoc +++ b/modules/ztp-configuring-ipsec-using-ztp-and-siteconfig.adoc @@ -4,7 +4,7 @@ :_module-type: PROCEDURE [id="ztp-configuring-ipsec-using-ztp-and-siteconfig_{context}"] -= Configuring IPsec encryption for {sno} clusters using {ztp} and SiteConfig resources += Configuring IPsec encryption for {sno} clusters using {ztp} and ClusterInstance resources You can enable IPsec encryption in managed {sno} clusters that you install using {ztp} and {rh-rhacm-first}. You can encrypt traffic between the managed cluster and IPsec endpoints external to the managed cluster. All network traffic between nodes on the OVN-Kubernetes cluster network is encrypted with IPsec in Transport mode. @@ -20,6 +20,8 @@ You can also configure IPsec encryption for {sno} clusters with an additional wo * You have logged in to the hub cluster as a user with `cluster-admin` privileges. +* You have installed the SiteConfig Operator in the hub cluster. + * You have configured {rh-rhacm} and the hub cluster for generating the required installation and policy custom resources (CRs) for managed clusters. * You have created a Git repository where you manage your custom site configuration data. @@ -94,40 +96,71 @@ out <1> The `ipsec/build.sh` script generates the Butane and endpoint configuration CRs. <2> You provide `ca.pem` and `left_server.p12` certificate files that are relevant to your network. -. Create a `custom-manifest/` folder in the repository where you manage your custom site configuration data. +. Create an `ipsec-manifests/` folder in the repository where you manage your custom site configuration data. Add the `enable-ipsec.yaml` and `99-ipsec-*` YAML files to the directory. For example: + [source,terminal] ---- -siteconfig - ├── site1-sno-du.yaml - ├── extra-manifest/ - └── custom-manifest - ├── enable-ipsec.yaml - ├── 99-ipsec-worker-endpoint-config.yaml - └── 99-ipsec-master-endpoint-config.yaml +site-configs/ + ├── hub-1/ + │ └── clusterinstance-site1-sno-du.yaml + ├── ipsec-manifests/ + │ ├── enable-ipsec.yaml + │ ├── 99-ipsec-worker-endpoint-config.yaml + │ └── 99-ipsec-master-endpoint-config.yaml + └── kustomization.yaml ---- -. In your `SiteConfig` CR, add the `custom-manifest/` directory to the `extraManifests.searchPaths` field. -For example: +. Create a `kustomization.yaml` file that uses `configMapGenerator` to package your IPsec manifests into a `ConfigMap`: ++ +[source,yaml] +---- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - hub-1/clusterinstance-site1-sno-du.yaml +configMapGenerator: + - name: ipsec-manifests-cm + namespace: site1-sno-du <1> + files: + - ipsec-manifests/enable-ipsec.yaml + - ipsec-manifests/99-ipsec-master-endpoint-config.yaml + - ipsec-manifests/99-ipsec-worker-endpoint-config.yaml +generatorOptions: + disableNameSuffixHash: true <2> +---- +<1> The namespace must match the `ClusterInstance` namespace. +<2> Disables the hash suffix so the `ConfigMap` name is predictable. + +. In your `ClusterInstance` CR, reference the `ConfigMap` in the `extraManifestsRefs` field: + [source,yaml] ---- -clusters: -- clusterName: "site1-sno-du" +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance +metadata: + name: "site1-sno-du" + namespace: "site1-sno-du" +spec: + clusterName: "site1-sno-du" networkType: "OVNKubernetes" - extraManifests: - searchPaths: - - extra-manifest/ - - custom-manifest/ + extraManifestsRefs: + - name: ipsec-manifests-cm <1> +# ... ---- +<1> Reference to the `ConfigMap` containing the IPsec manifests. ++ +[NOTE] +==== +If you have other extra manifests, you can either include them in the same `ConfigMap` or create multiple `ConfigMap` resources and reference them all in `extraManifestsRefs`. +==== -. Commit the `SiteConfig` CR changes and updated files in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption. +. Commit the `ClusterInstance` CR, IPsec manifest files, and `kustomization.yaml` changes in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption. + The Argo CD pipeline detects the changes and begins the managed cluster deployment. + -During cluster provisioning, the {ztp} pipeline appends the CRs in the `custom-manifest/` directory to the default set of extra manifests stored in the `extra-manifest/` directory. +During cluster provisioning, the SiteConfig Operator applies the CRs contained in the referenced `ConfigMap` resources as extra manifests. .Verification diff --git a/modules/ztp-configuring-kernel-arguments-for-discovery-iso.adoc b/modules/ztp-configuring-kernel-arguments-for-discovery-iso.adoc index 3f32bcf4257b..67ee1a60ea91 100644 --- a/modules/ztp-configuring-kernel-arguments-for-discovery-iso.adoc +++ b/modules/ztp-configuring-kernel-arguments-for-discovery-iso.adoc @@ -5,7 +5,9 @@ [id="setting-managed-bare-metal-host-kernel-arguments_{context}"] = Configuring Discovery ISO kernel arguments for installations using {ztp} -The {ztp-first} workflow uses the Discovery ISO as part of the {product-title} installation process on managed bare-metal hosts. You can edit the `InfraEnv` resource to specify kernel arguments for the Discovery ISO. This is useful for cluster installations with specific environmental requirements. For example, configure the `rd.net.timeout.carrier` kernel argument for the Discovery ISO to facilitate static networking for the cluster or to receive a DHCP address before downloading the root file system during installation. +The {ztp-first} workflow uses the Discovery ISO as part of the {product-title} installation process on managed bare-metal hosts. You can edit the `InfraEnv` resource to specify kernel arguments for the Discovery ISO. This is useful for cluster installations with specific environmental requirements. + +For example, configure the `rd.net.timeout.carrier` kernel argument for the Discovery ISO to facilitate static networking for the cluster or to receive a DHCP address before downloading the root file system during installation. [NOTE] ==== @@ -25,7 +27,7 @@ In {product-title} {product-version}, you can only add kernel arguments. You can + [NOTE] ==== -The `InfraEnv` CR in this example uses template syntax such as `{{ .Cluster.ClusterName }}` that is populated based on values in the `SiteConfig` CR. The `SiteConfig` CR automatically populates values for these templates during deployment. Do not edit the templates manually. +The `InfraEnv` CR in this example uses template syntax such as `{{ .Cluster.ClusterName }}` that is populated based on values in the `ClusterInstance` CR. The `ClusterInstance` CR automatically populates values for these templates during deployment. Do not edit the templates manually. ==== + [source,yaml] @@ -59,28 +61,58 @@ spec: <1> Specify the append operation to add a kernel argument. <2> Specify the kernel argument you want to configure. This example configures the audit kernel argument and the trace kernel argument. -. Commit the `InfraEnv-example.yaml` CR to the same location in your Git repository that has the `SiteConfig` CR and push your changes. The following example shows a sample Git repository structure: - +. Commit the `InfraEnv-example.yaml` file to your Git repository and push your changes. The following example shows a sample Git repository structure: + [source,text] ---- ~/example-ztp/install └── site-install - ├── siteconfig-example.yaml + ├── clusterinstance-example.yaml ├── InfraEnv-example.yaml - ... + └── kustomization.yaml ---- -. Edit the `spec.clusters.crTemplates` specification in the `SiteConfig` CR to reference the `InfraEnv-example.yaml` CR in your Git repository: +. Update the `kustomization.yaml` file to use the `configMapGenerator` field to package the `InfraEnv` CR into a `ConfigMap`: + -[source,yaml,options="nowrap",role="white-space-pre"] +[source,yaml] ---- -clusters: - crTemplates: - InfraEnv: "InfraEnv-example.yaml" +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - clusterinstance-example.yaml <1> +configMapGenerator: + - name: custom-infraenv-cm <2> + namespace: example-cluster <3> + files: + - InfraEnv-example.yaml +generatorOptions: + disableNameSuffixHash: true ---- +<1> The name of the `ClusterInstance` CR. +<2> The name of the `ConfigMap` that contains the custom `InfraEnv` CR. +<3> The namespace must match the `ClusterInstance` namespace. + +. In your `ClusterInstance` CR, reference the `ConfigMap` in the `spec.templateRefs` field: ++ +[source,yaml] +---- +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance +metadata: + name: "example-cluster" + namespace: "example-cluster" +spec: + clusterName: "example-cluster" + templateRefs: + - name: custom-infraenv-cm <1> + namespace: example-cluster +# ... +---- +<1> Reference to the `ConfigMap` CR that contains the custom `InfraEnv` CR template. + +. Commit the `ClusterInstance` CR and `kustomization.yaml` to your Git repository and push your changes. + -When you are ready to deploy your cluster by committing and pushing the `SiteConfig` CR, the build pipeline uses the custom `InfraEnv-example` CR in your Git repository to configure the infrastructure environment, including the custom kernel arguments. +When the Argo CD pipeline syncs the changes, the SiteConfig Operator uses the custom `InfraEnv-example` CR from the generated `ConfigMap` to configure the infrastructure environment, including the custom kernel arguments. .Verification To verify that the kernel arguments are applied, after the Discovery image verifies that {product-title} is ready for installation, you can SSH to the target host before the installation process begins. At that point, you can view the kernel arguments for the Discovery ISO in the `/proc/cmdline` file. diff --git a/modules/ztp-creating-the-site-secrets.adoc b/modules/ztp-creating-the-site-secrets.adoc index 6302ea640de5..8a622fcb09fa 100644 --- a/modules/ztp-creating-the-site-secrets.adoc +++ b/modules/ztp-creating-the-site-secrets.adoc @@ -11,8 +11,8 @@ Add the required `Secret` custom resources (CRs) for the managed bare-metal host [NOTE] ==== -The secrets are referenced from the `SiteConfig` CR by name. The namespace -must match the `SiteConfig` namespace. +The secrets are referenced from the `ClusterInstance` CR by name. The namespace +must match the `ClusterInstance` namespace. ==== .Procedure @@ -42,9 +42,9 @@ data: .dockerconfigjson: <4> type: kubernetes.io/dockerconfigjson ---- -<1> Must match the namespace configured in the related `SiteConfig` CR +<1> Must match the namespace configured in the related `ClusterInstance` CR <2> Base64-encoded values for `password` and `username` -<3> Must match the namespace configured in the related `SiteConfig` CR +<3> Must match the namespace configured in the related `ClusterInstance` CR <4> Base64-encoded pull secret . Add the relative path to `example-sno-secret.yaml` to the `kustomization.yaml` file that you use to install the cluster. diff --git a/modules/ztp-creating-ztp-crs-for-multiple-managed-clusters.adoc b/modules/ztp-creating-ztp-crs-for-multiple-managed-clusters.adoc index 0e74611b5a79..f3e77aa892e6 100644 --- a/modules/ztp-creating-ztp-crs-for-multiple-managed-clusters.adoc +++ b/modules/ztp-creating-ztp-crs-for-multiple-managed-clusters.adoc @@ -4,16 +4,13 @@ :_mod-docs-content-type: CONCEPT [id="ztp-creating-ztp-crs-for-multiple-managed-clusters_{context}"] -= Installing managed clusters with SiteConfig resources and {rh-rhacm} += Installing managed clusters with ClusterInstance resources and {rh-rhacm} -{ztp-first} uses `SiteConfig` custom resources (CRs) in a Git repository to manage the processes that install {product-title} clusters. The `SiteConfig` CR contains cluster-specific parameters required for installation. It has options for applying select configuration CRs during installation including user defined extra manifests. - -The {ztp} plugin processes `SiteConfig` CRs to generate a collection of CRs on the hub cluster. This triggers the assisted service in {rh-rhacm-first} to install {product-title} on the bare-metal host. You can find installation status and error messages in these CRs on the hub cluster. +{ztp-first} uses `ClusterInstance` custom resources (CRs) in a Git repository to manage the processes that install {product-title} clusters. The `ClusterInstance` CR contains cluster-specific parameters required for installation. It has options for applying select configuration CRs during installation including user defined extra manifests. +The {ztp} plugin processes `ClusterInstance` CRs to generate a collection of CRs on the hub cluster. This triggers the assisted service in {rh-rhacm-first} to install {product-title} on the bare-metal host. You can find installation status and error messages in these CRs on the hub cluster. You can provision single clusters manually or in batches with {ztp}: -Provisioning a single cluster:: Create a single `SiteConfig` CR and related installation and configuration CRs for the cluster, and apply them in the hub cluster to begin cluster provisioning. This is a good way to test your CRs before deploying on a larger scale. - -Provisioning many clusters:: Install managed clusters in batches of up to 400 by defining `SiteConfig` and related CRs in a Git repository. ArgoCD uses the `SiteConfig` CRs to deploy the sites. The {rh-rhacm} policy generator creates the manifests and applies them to the hub cluster. This starts the cluster provisioning process. +Provisioning a single cluster:: Create a single `ClusterInstance` CR and related installation and configuration CRs for the cluster, and apply them in the hub cluster to begin cluster provisioning. This is a good way to test your CRs before deploying on a larger scale. -include::snippets/siteconfig-deprecation-notice.adoc[] +Provisioning many clusters:: Install managed clusters in batches of up to 400 by defining `ClusterInstance` and related CRs in a Git repository. ArgoCD uses the `ClusterInstance` CRs to deploy the sites. The {rh-rhacm} policy generator creates the manifests and applies them to the hub cluster. This starts the cluster provisioning process. diff --git a/modules/ztp-deploying-a-site.adoc b/modules/ztp-deploying-a-site.adoc index dbab116a3b62..251c68f0cc26 100644 --- a/modules/ztp-deploying-a-site.adoc +++ b/modules/ztp-deploying-a-site.adoc @@ -4,16 +4,21 @@ :_mod-docs-content-type: PROCEDURE [id="ztp-deploying-a-site_{context}"] -= Deploying a managed cluster with SiteConfig and {ztp} += Deploying a managed cluster with ClusterInstance and {ztp} -Use the following procedure to create a `SiteConfig` custom resource (CR) and related files and initiate the {ztp-first} cluster deployment. +Use the following procedure to create a `ClusterInstance` custom resource (CR) and related files and initiate the {ztp-first} cluster deployment. -include::snippets/siteconfig-deprecation-notice.adoc[] +[NOTE] +==== +You require {rhacm-first} version 2.12 or later to install the SiteConfig Operator and use the `ClusterInstance` CR. +==== .Prerequisites * You have installed the OpenShift CLI (`oc`). +* You installed the SiteConfig Operator in the hub cluster. + * You have logged in to the hub cluster as a user with `cluster-admin` privileges. * You configured the hub cluster for generating the required installation and policy CRs. @@ -29,11 +34,11 @@ When you create the source repository, ensure that you patch the ArgoCD applicat + Network connectivity:: Your network requires DNS. Managed cluster hosts should be reachable from the hub cluster. Ensure that Layer 3 connectivity exists between the hub cluster and the managed cluster host. + -Baseboard Management Controller (BMC) details:: {ztp} uses BMC username and password details to connect to the BMC during cluster installation. The {ztp} plugin manages the `ManagedCluster` CRs on the hub cluster based on the `SiteConfig` CR in your site Git repo. You create individual `BMCSecret` CRs for each host manually. +Baseboard Management Controller (BMC) details:: {ztp} uses BMC username and password details to connect to the BMC during cluster installation. The {ztp} plugin manages the `ManagedCluster` CRs on the hub cluster based on the `ClusterInstance` CR in your site Git repo. You create individual `BMCSecret` CRs for each host manually. .Procedure -. Create the required managed cluster secrets on the hub cluster. These resources must be in a namespace with a name matching the cluster name. For example, in `out/argocd/example/siteconfig/example-sno.yaml`, the cluster name and namespace is `example-sno`. +. Create the required managed cluster secrets on the hub cluster. These resources must be in a namespace with a name matching the cluster name. For example, in `out/argocd/example/clusterinstance/example-sno.yaml`, the cluster name and namespace is `example-sno`. .. Export the cluster namespace by running the following command: + @@ -53,12 +58,12 @@ $ oc create namespace $CLUSTERNS + [NOTE] ==== -The secrets are referenced from the `SiteConfig` custom resource (CR) by name. The namespace must match the `SiteConfig` namespace. +The secrets are referenced from the `ClusterInstance` custom resource (CR) by name. The namespace must match the `ClusterInstance` namespace. ==== -. Create a `SiteConfig` CR for your cluster in your local clone of the Git repository: +. Create a `ClusterInstance` CR for your cluster in your local clone of the Git repository: -.. Choose the appropriate example for your CR from the `out/argocd/example/siteconfig/` folder. +.. Choose the appropriate example for your CR from the `out/argocd/example/clusterinstance/` folder. The folder includes example files for single node, three-node, and standard clusters: + *** `example-sno.yaml` @@ -67,7 +72,7 @@ The folder includes example files for single node, three-node, and standard clus .. Change the cluster and host details in the example file to match the type of cluster you want. For example: + -.Example {sno} SiteConfig CR +.Example {sno} ClusterInstance CR [source,yaml] ---- include::snippets/ztp_example-sno.yaml[] @@ -80,14 +85,15 @@ For more information about BMC addressing, see the "Additional resources" sectio + [NOTE] ==== -To override the default `BareMetalHost` CR for a node, you can reference the override CR in the node-level `crTemplates` field in the `SiteConfig` CR. Ensure that you set the `argocd.argoproj.io/sync-wave: "3"` annotation in your override `BareMetalHost` CR. +To override the default `BareMetalHost` CR for a node, create a custom node template in a `ConfigMap` and reference it in the node-level `spec.nodes.templateRefs` field in the `ClusterInstance` CR. Ensure that you set the `argocd.argoproj.io/sync-wave: "3"` annotation in your override `BareMetalHost` CR. ==== +// TODO: Is this wave annotation still relevant? + .. You can inspect the default set of extra-manifest `MachineConfig` CRs in `out/argocd/extra-manifest`. It is automatically applied to the cluster when it is installed. -.. Optional: To provision additional install-time manifests on the provisioned cluster, create a directory in your Git repository, for example, `sno-extra-manifest/`, and add your custom manifest CRs to this directory. If your `SiteConfig.yaml` refers to this directory in the `extraManifestPath` field, any CRs in this referenced directory are appended to the default set of extra manifests. +.. Optional: To provision additional install-time manifests on the provisioned cluster, package your extra manifest CRs in a `ConfigMap` and reference it in the `extraManifestsRefs` field of the `ClusterInstance` CR. For more information, see "Customizing extra installation manifests in the {ztp} pipeline". + -.Enabling the crun OCI container runtime [IMPORTANT] ==== For optimal cluster performance, enable crun for master and worker nodes in {sno}, {sno} with additional worker nodes, {3no}, and standard clusters. @@ -95,12 +101,11 @@ For optimal cluster performance, enable crun for master and worker nodes in {sno Enable crun in a `ContainerRuntimeConfig` CR as an additional Day 0 install-time manifest to avoid the cluster having to reboot. The `enable-crun-master.yaml` and `enable-crun-worker.yaml` CR files are in the `out/source-crs/optional-extra-manifest/` folder that you can extract from the `ztp-site-generate` container. -For more information, see "Customizing extra installation manifests in the {ztp} pipeline". ==== -. Add the `SiteConfig` CR to the `kustomization.yaml` file in the `generators` section, similar to the example shown in `out/argocd/example/siteconfig/kustomization.yaml`. +. Add the `ClusterInstance` CR to the `kustomization.yaml` file in the `generators` section, similar to the example shown in `out/argocd/example/clusterinstance/kustomization.yaml`. -. Commit the `SiteConfig` CR and associated `kustomization.yaml` changes in your Git repository and push the changes. +. Commit the `ClusterInstance` CR and associated `kustomization.yaml` changes in your Git repository and push the changes. + The ArgoCD pipeline detects the changes and begins the managed cluster deployment. diff --git a/modules/ztp-deploying-user-defined-firmware-configuration-with-gitops-ztp.adoc b/modules/ztp-deploying-user-defined-firmware-configuration-with-gitops-ztp.adoc index 8f557ce3f367..f769e8239718 100644 --- a/modules/ztp-deploying-user-defined-firmware-configuration-with-gitops-ztp.adoc +++ b/modules/ztp-deploying-user-defined-firmware-configuration-with-gitops-ztp.adoc @@ -6,112 +6,136 @@ [id="ztp-deploying-user-defined-firmware-configuration-with-gitops-ztp_{context}"] = Deploying user-defined firmware to cluster hosts with {ztp} -You can deploy user-defined firmware settings to cluster hosts by configuring the `SiteConfig` custom resource (CR) to include a hardware profile that you want to apply during cluster host provisioning. +You can deploy user-defined firmware settings to cluster hosts by creating custom node templates that include `HostFirmwareSettings` CRs, and referencing them in the `ClusterInstance` CR. You can configure hardware profiles to apply to hosts in the following scenarios: -* All hosts site-wide -* Only cluster hosts that meet certain criteria -* Individual cluster hosts +* All hosts in the cluster +* Individual hosts in the cluster [IMPORTANT] ==== You can configure host hardware profiles to be applied in a hierarchy. -Cluster-level settings override site-wide settings. -Node level profiles override cluster and site-wide settings. +Node-level profiles override cluster-wide settings. ==== .Prerequisites * You have installed the OpenShift CLI (`oc`). -* You have installed {rh-rhacm-first} and logged in to the hub cluster as a user with `cluster-admin` privileges. +* You have installed {rh-rhacm-first} version 2.12 or later and logged in to the hub cluster as a user with `cluster-admin` privileges. -* You have provisioned a cluster that is managed by {rh-rhacm}. +* You have installed the SiteConfig Operator in the hub cluster. * You created a Git repository where you manage your custom site configuration data. The repository must be accessible from the hub cluster and be defined as a source repository for the Argo CD application. .Procedure -. Create the host firmware profile that contain the firmware settings you want to apply. +. Create the `HostFirmwareSettings` CR that contains the firmware settings you want to apply. For example, create the following YAML file: + -.host-firmware.profile +.host-firmware-settings.yaml [source,yaml] ---- -BootMode: Uefi -LogicalProc: Enabled -ProcVirtualization: Enabled +apiVersion: metal3.io/v1alpha1 +kind: HostFirmwareSettings +metadata: + name: "site1-sno-du" + namespace: "site1-sno-du" +spec: + settings: + BootMode: "Uefi" + LogicalProc: "Enabled" + ProcVirtualization: "Enabled" ---- -. Save the hardware profile YAML file relative to the `kustomization.yaml` file that you use to define how to provision the cluster, for example: +. Save the `HostFirmwareSettings` CR file relative to the `kustomization.yaml` file that you use to provision the cluster. +For example: + [source,terminal] ---- -example-ztp/install - └── site-install - ├── siteconfig-example.yaml - ├── kustomization.yaml - └── host-firmware.profile +site-configs/ + └── site1-sno-du/ + ├── clusterinstance-site1-sno-du.yaml + ├── kustomization.yaml + └── host-firmware-settings.yaml ---- -. Edit the `SiteConfig` CR to include the firmware profile that you want to apply in the cluster. +. Create a `ConfigMap` to store the `HostFirmwareSettings` CR. +You can use a `kustomization.yaml` file with `configMapGenerator` to create the `ConfigMap`. For example: + [source,yaml] ---- -apiVersion: ran.openshift.io/v1 -kind: SiteConfig -metadata: - name: "site-plan-cluster" - namespace: "example-cluster-namespace" -spec: - baseDomain: "example.com" - # ... - biosConfigRef: - filePath: "./host-firmware.profile" <1> +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - clusterinstance-site1-sno-du.yaml +configMapGenerator: + - name: host-firmware-settings-cm + namespace: site1-sno-du <1> + files: + - host-firmware-settings.yaml <2> +generatorOptions: + disableNameSuffixHash: true ---- -<1> Applies the hardware profile to all cluster hosts site-wide - -+ -[NOTE] -==== -Where possible, use a single `SiteConfig` CR per cluster. -==== +<1> The namespace must match the `ClusterInstance` namespace. +<2> The name of the `HostFirmwareSettings` CR. -. Optional. To apply a hardware profile to hosts in a specific cluster, update `clusters.biosConfigRef.filePath` with the hardware profile that you want to apply. +. To apply a hardware profile to all hosts in the cluster, reference the `ConfigMap` in the `spec.templateRefs` field of your `ClusterInstance` CR. For example: + [source,yaml] ---- -clusters: - - clusterName: "cluster-1" - # ... - biosConfigRef: - filePath: "./host-firmware.profile" <1> +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance +metadata: + name: "site1-sno-du" + namespace: "site1-sno-du" +spec: + clusterName: "site1-sno-du" + # ... + templateRefs: + - name: host-firmware-settings-cm <1> + namespace: site1-sno-du + nodes: + - hostName: "node1.example.com" + # ... ---- -<1> Applies to all hosts in the `cluster-1` cluster +<1> Applies the firmware profile to all hosts in the cluster. -. Optional. To apply a hardware profile to a specific host in the cluster, update `clusters.nodes.biosConfigRef.filePath` with the hardware profile that you want to apply. +. Optional: To apply a hardware profile to a specific host in the cluster, reference the `ConfigMap` in the `spec.nodes[].templateRefs` field. For example: + [source,yaml] ---- -clusters: - - clusterName: "cluster-1" - # ... - nodes: - - hostName: "compute-1.example.com" - # ... - bootMode: "UEFI" - biosConfigRef: - filePath: "./host-firmware.profile" <1> +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance +metadata: + name: "site1-sno-du" + namespace: "site1-sno-du" +spec: + clusterName: "site1-sno-du" + # ... + nodes: + - hostName: "node1.example.com" + # ... + templateRefs: + - name: host-firmware-node1-cm <1> + namespace: site1-sno-du + - hostName: "node2.example.com" + # ... ---- -<1> Applies the firmware profile to the `compute-1.example.com` host in the cluster +<1> Applies the firmware profile only to the `node1.example.com` host. ++ +[NOTE] +==== +Node-level `templateRefs` settings override cluster-level `templateRefs` settings. +==== -. Commit the `SiteConfig` CR and associated `kustomization.yaml` changes in your Git repository and push the changes. +. Commit the `ClusterInstance` CR, `ConfigMap`, and associated `kustomization.yaml` changes in your Git repository and push the changes. + -The ArgoCD pipeline detects the changes and begins the managed cluster deployment. +The Argo CD pipeline detects the changes and begins the managed cluster deployment. + [NOTE] ==== @@ -129,6 +153,8 @@ For example, run the following command: $ oc get hfs -n -o jsonpath='{.status.conditions[?(@.type=="Valid")].status}' ---- + +** where `` is the namespace of the managed cluster and `` is the name of the managed cluster. ++ .Example output [source,terminal] ---- diff --git a/modules/ztp-monitoring-installation-progress.adoc b/modules/ztp-monitoring-installation-progress.adoc index 56e8dba4b6de..e7474d1981cc 100644 --- a/modules/ztp-monitoring-installation-progress.adoc +++ b/modules/ztp-monitoring-installation-progress.adoc @@ -6,7 +6,7 @@ [id="ztp-monitoring-deployment-progress_{context}"] = Monitoring managed cluster installation progress -The ArgoCD pipeline uses the `SiteConfig` CR to generate the cluster configuration CRs and syncs it with the hub cluster. You can monitor the progress of the synchronization in the ArgoCD dashboard. +The Argo CD pipeline syncs the `ClusterInstance` CR from the Git repository to the hub cluster. The SiteConfig Operator then processes the `ClusterInstance` CR and generates the required cluster configuration CRs. You can monitor the progress of the cluster installation from the {rh-rhacm} dashboard or from the command line. .Prerequisites diff --git a/modules/ztp-preparing-the-hub-cluster-for-ztp.adoc b/modules/ztp-preparing-the-hub-cluster-for-ztp.adoc index f90995c6ab24..3171134e9a2c 100644 --- a/modules/ztp-preparing-the-hub-cluster-for-ztp.adoc +++ b/modules/ztp-preparing-the-hub-cluster-for-ztp.adoc @@ -10,7 +10,7 @@ You can configure the hub cluster with a set of ArgoCD applications that generat [NOTE] ==== -{rh-rhacm-first} uses `SiteConfig` CRs to generate the Day 1 managed cluster installation CRs for ArgoCD. Each ArgoCD application can manage a maximum of 300 `SiteConfig` CRs. +{rh-rhacm-first} uses `ClusterInstance` CRs to generate the Day 1 managed cluster installation CRs for ArgoCD. Each ArgoCD application can manage a maximum of 300 `ClusterInstance` CRs. ==== .Prerequisites @@ -37,7 +37,7 @@ You can configure the hub cluster with a set of ArgoCD applications that generat *** The `targetRevision` indicates which Git repository branch to monitor. -*** `path` specifies the path to the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` CRs, respectively. +*** `path` specifies the path to the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` CRs, respectively. [start=2] include::snippets/ztp-patch-argocd-hub-cluster.adoc[] diff --git a/modules/ztp-preparing-the-ztp-git-repository-ver-ind.adoc b/modules/ztp-preparing-the-ztp-git-repository-ver-ind.adoc index 9aacf70fb696..90e798271baf 100644 --- a/modules/ztp-preparing-the-ztp-git-repository-ver-ind.adoc +++ b/modules/ztp-preparing-the-ztp-git-repository-ver-ind.adoc @@ -22,7 +22,7 @@ The following procedure assumes you are using `PolicyGenerator` resources instea .Procedure -. Create a directory structure with separate paths for the `SiteConfig` and `PolicyGenerator` CRs. +. Create a directory structure with separate paths for the `ClusterInstance` and `PolicyGenerator` CRs. . Within the `PolicyGenerator` directory, create a directory for each {product-title} version you want to make available. For each version, create the following resources: @@ -31,7 +31,7 @@ For each version, create the following resources: + If you want to work with user-provided CRs, you must create a separate directory for them. -. In the `/siteconfig` directory, create a subdirectory for each {product-title} version you want to make available. For each version, create at least one directory for reference CRs to be copied from the container. There is no restriction on the naming of directories or on the number of reference directories. If you want to work with custom manifests, you must create a separate directory for them. +. In the `/clusterinstance` directory, create a subdirectory for each {product-title} version you want to make available. For each version, create at least one directory for reference CRs to be copied from the container. There is no restriction on the naming of directories or on the number of reference directories. If you want to work with custom manifests, you must create a separate directory for them. + The following example describes a structure using user-provided manifests and CRs for different versions of {product-title}: + @@ -59,7 +59,7 @@ The following example describes a structure using user-provided manifests and CR │ └── source-crs/ <4> │ └── reference-crs/ <5> │ └── custom-crs/ <6> -└── siteconfig +└── clusterinstance ├── kustomization.yaml ├── version_4.13 │ ├── helix56-v413.yaml @@ -69,8 +69,8 @@ The following example describes a structure using user-provided manifests and CR └── version_4.14 ├── helix57-v414.yaml ├── kustomization.yaml - ├── extra-manifest/ <7> - └── custom-manifest/ <8> + ├── extra-manifest/ + └── custom-manifest/ ---- <1> Create a top-level `kustomization` YAML file. @@ -79,32 +79,56 @@ The following example describes a structure using user-provided manifests and CR <4> Create a `source-crs` directory for each version to contain reference CRs from the `ztp-site-generate` container. <5> Create the `reference-crs` directory for policy CRs that are extracted from the ZTP container. <6> Optional: Create a `custom-crs` directory for user-provided CRs. -<7> Create a directory within the custom `/siteconfig` directory to contain extra manifests from the `ztp-site-generate` container. +<7> Create a directory within the custom `/clusterinstance` directory to contain extra manifests from the `ztp-site-generate` container. <8> Create a folder to hold user-provided manifests. + [NOTE] ==== -In the previous example, each version subdirectory in the custom `/siteconfig` directory contains two further subdirectories, one containing the reference manifests copied from the container, the other for custom manifests that you provide. +In the example directory structure, each version subdirectory in the custom `/clusterinstance` directory contains two further subdirectories, one containing the reference manifests copied from the container, the other for custom manifests that you provide. The names assigned to those directories are examples. -If you use user-provided CRs, the last directory listed under `extraManifests.searchPaths` in the `SiteConfig` CR must be the directory containing user-provided CRs. ==== -. Edit the `SiteConfig` CR to include the search paths of any directories you have created. -The first directory that is listed under `extraManifests.searchPaths` must be the directory containing the reference manifests. -Consider the order in which the directories are listed. -In cases where directories contain files with the same name, the file in the final directory takes precedence. +. Create ConfigMaps from the manifest directories and reference them in the `ClusterInstance` CR using the `extraManifestsRefs` field. + -.Example SiteConfig CR +.Example kustomization.yaml with configMapGenerator + [source,yaml] ---- -extraManifests: - searchPaths: - - extra-manifest/ <1> - - custom-manifest/ <2> +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +configMapGenerator: +- name: extra-manifests-cm + namespace: helix56-v413 + files: + - extra-manifest/workload-partitioning.yaml <1> + - extra-manifest/enable-crun-master.yaml + - custom-manifest/custom-config.yaml <2> + # ... + +generatorOptions: + disableNameSuffixHash: true +---- +<1> Extra manifest files from the `ztp-site-generate` container. +<2> User-provided custom manifest files. + +. Edit the `ClusterInstance` CR to reference the `ConfigMap` CR: ++ +.Example ClusterInstance CR ++ +[source,yaml] +---- +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance +metadata: + name: helix56-v413 + namespace: helix56-v413 +spec: + # ... + extraManifestsRefs: + - name: extra-manifests-cm <1> ---- -<1> The directory containing the reference manifests must be listed first under `extraManifests.searchPaths`. -<2> If you are using user-provided CRs, the last directory listed under `extraManifests.searchPaths` in the `SiteConfig` CR must be the directory containing those user-provided CRs. +<1> Reference the ConfigMap containing the extra manifests. . Edit the top-level `kustomization.yaml` file to control which {product-title} versions are active. The following is an example of a `kustomization.yaml` file at the top level: + diff --git a/modules/ztp-preparing-the-ztp-git-repository.adoc b/modules/ztp-preparing-the-ztp-git-repository.adoc index 7d16fcb99238..66868527ec23 100644 --- a/modules/ztp-preparing-the-ztp-git-repository.adoc +++ b/modules/ztp-preparing-the-ztp-git-repository.adoc @@ -16,12 +16,12 @@ Before you can use the {ztp-first} pipeline, you need to prepare the Git reposit .Procedure -. Create a directory structure with separate paths for the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` CRs. +. Create a directory structure with separate paths for the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` CRs. + [NOTE] ==== -Keep `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` CRs in separate directories. -Both the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` directories must contain a `kustomization.yaml` file that explicitly includes the files in that directory. +Keep `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` CRs in separate directories. +Both the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` directories must contain a `kustomization.yaml` file that explicitly includes the files in that directory. ==== . Export the `argocd` directory from the `ztp-site-generate` container image using the following commands: @@ -43,15 +43,15 @@ $ podman run --log-driver=none --rm registry.redhat.io/openshift4/ztp-site-gener . Check that the `out` directory contains the following subdirectories: + -* `out/extra-manifest` contains the source CR files that `SiteConfig` uses to generate extra manifest `configMap`. +* `out/extra-manifest` contains the source CR files that `ClusterInstance` uses to generate extra manifest `configMap`. * `out/source-crs` contains the source CR files that `PolicyGenerator` uses to generate the {rh-rhacm-first} policies. * `out/argocd/deployment` contains patches and YAML files to apply on the hub cluster for use in the next step of this procedure. -* `out/argocd/example` contains the examples for `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` files that represent the recommended configuration. +* `out/argocd/example/clusterinstance` contains the examples for `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` files that represent the recommended configuration. . Copy the `out/source-crs` folder and contents to the `PolicyGenerator` or `PolicyGentemplate` directory. . The out/extra-manifests directory contains the reference manifests for a RAN DU cluster. -Copy the `out/extra-manifests` directory into the `SiteConfig` folder. +Copy the `out/extra-manifests` directory into the `ClusterInstance` folder. This directory should contain CRs from the `ztp-site-generate` container only. Do not add user-provided CRs here. If you want to work with user-provided CRs you must create another directory for that content. @@ -66,7 +66,7 @@ example/ ├── policygentemplates <1> │ ├── kustomization.yaml │ └── source-crs/ - └── siteconfig + └── clusterinstance ├── extra-manifests └── kustomization.yaml ---- @@ -77,13 +77,13 @@ Equivalent and improved functionality is available by using {rh-rhacm-first} and The initial push to Git should include the `kustomization.yaml` files. You can use the directory structure under `out/argocd/example` as a reference for the structure and content of your Git repository. -That structure includes `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` reference CRs for single-node, three-node, and standard clusters. +That structure includes `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` reference CRs for single-node, three-node, and standard clusters. Remove references to cluster types that you are not using. For all cluster types, you must: * Add the `source-crs` subdirectory to the `acmpolicygenerator` or `policygentemplates` directory. -* Add the `extra-manifests` directory to the `siteconfig` directory. +* Add the `extra-manifests` directory to the `clusterinstance` directory. The following example describes a set of CRs for a network of single-node clusters: @@ -98,7 +98,7 @@ example/ │ ├── kustomization.yaml │ ├── source-crs/ │ └── ns.yaml - └── siteconfig + └── clusterinstance ├── example-sno.yaml ├── extra-manifests/ <1> ├── custom-manifests/ <2> diff --git a/modules/ztp-required-changes-to-the-git-repository.adoc b/modules/ztp-required-changes-to-the-git-repository.adoc index 451214d7f3b0..34fc02b84d8f 100644 --- a/modules/ztp-required-changes-to-the-git-repository.adoc +++ b/modules/ztp-required-changes-to-the-git-repository.adoc @@ -19,7 +19,7 @@ All `PolicyGenerator` files must be created in a `Namespace` prefixed with `ztp` * Add the `kustomization.yaml` file to the repository: + -All `SiteConfig` and `PolicyGenerator` CRs must be included in a `kustomization.yaml` file under their respective directory trees. For example: +All `ClusterInstance` and `PolicyGenerator` CRs must be included in a `kustomization.yaml` file under their respective directory trees. For example: + [source,terminal] ---- @@ -33,7 +33,7 @@ All `SiteConfig` and `PolicyGenerator` CRs must be included in a `kustomization. │ ├── group-du-sno-ranGen-ns.yaml │ ├── group-du-sno-ranGen.yaml │ └── kustomization.yaml -└── siteconfig +└── clusterinstance ├── site1.yaml ├── site2.yaml └── kustomization.yaml @@ -41,7 +41,7 @@ All `SiteConfig` and `PolicyGenerator` CRs must be included in a `kustomization. + [NOTE] ==== -The files listed in the `generator` sections must contain either `SiteConfig` or `{policy-gen-cr}` CRs only. If your existing YAML files contain other CRs, for example, `Namespace`, these other CRs must be pulled out into separate files and listed in the `resources` section. +The files listed in the `generator` sections must contain either `ClusterInstance` or `{policy-gen-cr}` CRs only. If your existing YAML files contain other CRs, for example, `Namespace`, these other CRs must be pulled out into separate files and listed in the `resources` section. ==== + The `PolicyGenerator` kustomization file must contain all `PolicyGenerator` YAML files in the `generator` section and `Namespace` CRs in the `resources` section. For example: @@ -64,7 +64,7 @@ resources: - site2-ns.yaml ---- + -The `SiteConfig` kustomization file must contain all `SiteConfig` YAML files in the `generator` section and any other CRs in the resources: +The `ClusterInstance` kustomization file must contain all `ClusterInstance` YAML files in the `generator` section and any other CRs in the resources: + [source,terminal] ---- @@ -82,11 +82,11 @@ In {product-title} 4.10 and later, the `pre-sync.yaml` and `post-sync.yaml` file + [NOTE] ==== -There is a set of `pre-sync.yaml` and `post-sync.yaml` files under both the `SiteConfig` and `{policy-gen-cr}` trees. +There is a set of `pre-sync.yaml` and `post-sync.yaml` files under both the `ClusterInstance` and `{policy-gen-cr}` trees. ==== * Review and incorporate recommended changes + Each release may include additional recommended changes to the configuration applied to deployed clusters. Typically these changes result in lower CPU use by the OpenShift platform, additional features, or improved tuning of the platform. + -Review the reference `SiteConfig` and `PolicyGenerator` CRs applicable to the types of cluster in your network. These examples can be found in the `argocd/example` directory extracted from the {ztp} container. +Review the reference `ClusterInstance` and `PolicyGenerator` CRs applicable to the types of cluster in your network. These examples can be found in the `argocd/example` directory extracted from the {ztp} container. diff --git a/modules/ztp-site-cleanup.adoc b/modules/ztp-site-cleanup.adoc index eba3ea3d1cad..339484666454 100644 --- a/modules/ztp-site-cleanup.adoc +++ b/modules/ztp-site-cleanup.adoc @@ -16,9 +16,9 @@ You can remove a managed site and the associated installation and configuration .Procedure -. Remove a site and the associated CRs by removing the associated `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` files from the `kustomization.yaml` file. +. Remove a site and the associated CRs by removing the associated `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` files from the `kustomization.yaml` file. -. Add the following `syncOptions` field to your `SiteConfig` application. +. Add the following `syncOptions` field to the ArgoCD application that manages the target site. + [source,yaml] ---- @@ -31,6 +31,6 @@ spec: + When you run the {ztp} pipeline again, the generated CRs are removed. -. Optional: If you want to permanently remove a site, you should also remove the `SiteConfig` and site-specific `PolicyGenerator` or `PolicyGentemplate` files from the Git repository. +. Optional: If you want to permanently remove a site, you should also remove the `ClusterInstance` and site-specific `PolicyGenerator` or `PolicyGentemplate` files from the Git repository. -. Optional: If you want to remove a site temporarily, for example when redeploying a site, you can leave the `SiteConfig` and site-specific `PolicyGenerator` or `PolicyGentemplate` CRs in the Git repository. \ No newline at end of file +. Optional: If you want to remove a site temporarily, for example when redeploying a site, you can leave the `ClusterInstance` and site-specific `PolicyGenerator` or `PolicyGentemplate` CRs in the Git repository. \ No newline at end of file diff --git a/modules/ztp-sno-accelerated-ztp.adoc b/modules/ztp-sno-accelerated-ztp.adoc index f4c57c4c8ee8..e0ac3465b049 100644 --- a/modules/ztp-sno-accelerated-ztp.adoc +++ b/modules/ztp-sno-accelerated-ztp.adoc @@ -22,11 +22,11 @@ Accelerated provisioning of {ztp} is supported only when installing {sno} with A You can activate accelerated ZTP using the `spec.clusters.clusterLabels.accelerated-ztp` label, as in the following example: -.Example Accelerated ZTP `SiteConfig` CR. +.Example Accelerated ZTP `ClusterInstance` CR. [source,yaml] ---- -apiVersion: ran.openshift.io/v2 -kind: SiteConfig +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance metadata: name: "example-sno" namespace: "example-sno" @@ -36,14 +36,13 @@ spec: name: "assisted-deployment-pull-secret" clusterImageSetNameRef: "openshift-4.20" sshPublicKey: "ssh-rsa AAAA..." - clusters: - # ... - clusterLabels: - common: true - group-du-sno: "" - sites : "example-sno" - accelerated-ztp: full - + extraLabels: + ManagedCluster: # <-- Resource type as outer key + common: "true" + group-du-sno: "" + sites: "example-sno" + accelerated-ztp: full # <-- Accelerated ZTP label +# ... ---- You can use `accelerated-ztp: full` to fully automate the accelerated process. @@ -74,7 +73,7 @@ Accelerated ZTP uses an additional `ConfigMap` to create the resources derived f The standard `ConfigMap` includes manifests that the {ztp} workflow uses to customize cluster installs. {cgu-operator} detects that the `accelerated-ztp` label is set and then creates a second `ConfigMap`. -As part of accelerated ZTP, the `SiteConfig` generator adds a reference to that second `ConfigMap` using the naming convention `-aztp`. +As part of accelerated ZTP, the SiteConfig Operator adds a reference to that second `ConfigMap` using the naming convention `-aztp`. After {cgu-operator} creates that second `ConfigMap`, it finds all policies bound to the managed cluster and extracts the {ztp} profile information. {cgu-operator} adds the {ztp} profile information to the `-aztp` `ConfigMap` custom resource (CR) and applies the CR to the hub cluster API. diff --git a/modules/ztp-troubleshooting-ztp-gitops-installation-crs.adoc b/modules/ztp-troubleshooting-ztp-gitops-installation-crs.adoc index 70344519f167..45599efb69c1 100644 --- a/modules/ztp-troubleshooting-ztp-gitops-installation-crs.adoc +++ b/modules/ztp-troubleshooting-ztp-gitops-installation-crs.adoc @@ -6,7 +6,7 @@ [id="ztp-troubleshooting-ztp-gitops-installation-crs_{context}"] = Troubleshooting {ztp} by validating the installation CRs -The ArgoCD pipeline uses the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` custom resources (CRs) to generate the cluster configuration CRs and {rh-rhacm-first} policies. Use the following steps to troubleshoot issues that might occur during this process. +The ArgoCD pipeline uses the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` custom resources (CRs) to generate the cluster configuration CRs and {rh-rhacm-first} policies. Use the following steps to troubleshoot issues that might occur during this process. .Prerequisites @@ -23,9 +23,9 @@ The ArgoCD pipeline uses the `SiteConfig` and `PolicyGenerator` or `PolicyGentem $ oc get AgentClusterInstall -n ---- + -If no object is returned, use the following steps to troubleshoot the ArgoCD pipeline flow from `SiteConfig` files to the installation CRs. +If no object is returned, use the following steps to troubleshoot the ArgoCD pipeline flow from `ClusterInstance` files to the installation CRs. -. Verify that the `ManagedCluster` CR was generated using the `SiteConfig` CR on the hub cluster: +. Verify that the `ManagedCluster` CR was generated using the `ClusterInstance` CR on the hub cluster: + [source,terminal] ---- @@ -39,6 +39,7 @@ $ oc get managedcluster $ oc get applications.argoproj.io -n openshift-gitops clusters -o yaml ---- +// TODO: Can we get example errors for ClusterInstance, or just remove? .. To identify error logs for the managed cluster, inspect the `status.operationState.syncResult.resources` field. For example, if an invalid value is assigned to the `extraManifestPath` in the `SiteConfig` CR, an error similar to the following is generated: + [source,text] diff --git a/snippets/ztp_example-sno.yaml b/snippets/ztp_example-sno.yaml index 98f57b14d09b..a124e6ee1333 100644 --- a/snippets/ztp_example-sno.yaml +++ b/snippets/ztp_example-sno.yaml @@ -1,154 +1,152 @@ -# example-node1-bmh-secret & assisted-deployment-pull-secret need to be created under same namespace example-sno +# example-node1-bmh-secret & assisted-deployment-pull-secret need to be created under same namespace example-ai-sno --- -apiVersion: ran.openshift.io/v1 -kind: SiteConfig +apiVersion: siteconfig.open-cluster-management.io/v1alpha1 +kind: ClusterInstance metadata: - name: "example-sno" - namespace: "example-sno" + name: "example-ai-sno" + namespace: "example-ai-sno" spec: baseDomain: "example.com" pullSecretRef: name: "assisted-deployment-pull-secret" - clusterImageSetNameRef: "openshift-4.18" + clusterImageSetNameRef: "openshift-4.21" sshPublicKey: "ssh-rsa AAAA..." - clusters: - - clusterName: "example-sno" - networkType: "OVNKubernetes" - # installConfigOverrides is a generic way of passing install-config - # parameters through the siteConfig. The 'capabilities' field configures - # the composable openshift feature. In this 'capabilities' setting, we - # remove all the optional set of components. - # Notes: - # - OperatorLifecycleManager is needed for 4.15 and later - # - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier - # - Ingress is needed for 4.16 and later - installConfigOverrides: | + clusterName: "example-ai-sno" + networkType: "OVNKubernetes" + # installConfigOverrides is a generic way of passing install-config + # parameters through the siteConfig. The 'capabilities' field configures + # the composable openshift feature. In this 'capabilities' setting, we + # remove all the optional set of components. + # Notes: + # - OperatorLifecycleManager is needed for 4.15 and later + # - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier + # - Ingress is needed for 4.16 and later + installConfigOverrides: | + { + "capabilities": { + "baselineCapabilitySet": "None", + "additionalEnabledCapabilities": [ + "NodeTuning", + "OperatorLifecycleManager", + "Ingress" + ] + } + } + # Include references to extraManifest ConfigMaps. + extraManifestsRefs: + - name: sno-extra-manifest-configmap + extraLabels: + ManagedCluster: + # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples + du-profile: "latest" + # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates: + # ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true' + common: "true" + # ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""' + group-du-sno: "" + # ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"' + # Normally this should match or contain the cluster name so it only applies to a single cluster + sites : "example-sno" + clusterNetwork: + - cidr: 1001:1::/48 + hostPrefix: 64 + machineNetwork: + - cidr: 1111:2222:3333:4444::/64 + serviceNetwork: + - cidr: 1001:2::/112 + additionalNTPSources: + - 1111:2222:3333:4444::2 + # Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate + # please see Workload Partitioning Feature for a complete guide. + cpuPartitioningMode: AllNodes + templateRefs: + - name: ai-cluster-templates-v1 + namespace: open-cluster-management + nodes: + - hostName: "example-node1.example.com" + role: "master" + bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1" + bmcCredentialsName: + name: "example-node1-bmh-secret" + bootMACAddress: "AA:BB:CC:DD:EE:11" + # Use UEFISecureBoot to enable secure boot, UEFI to disable. + bootMode: "UEFISecureBoot" + rootDeviceHints: + deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0" + # disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md in argocd folder for more details + ignitionConfigOverride: | { - "capabilities": { - "baselineCapabilitySet": "None", - "additionalEnabledCapabilities": [ - "NodeTuning", - "OperatorLifecycleManager", - "Ingress" - ] - } - } - # It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+. - # The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest. - # extraManifestPath: sno-extra-manifest - clusterLabels: - # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples - du-profile: "latest" - # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates: - # ../acmpolicygenerator/common-ranGen.yaml will apply to all clusters with 'common: true' - common: true - # ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""' - group-du-sno: "" - # ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"' - # Normally this should match or contain the cluster name so it only applies to a single cluster - sites: "example-sno" - clusterNetwork: - - cidr: 1001:1::/48 - hostPrefix: 64 - machineNetwork: - - cidr: 1111:2222:3333:4444::/64 - serviceNetwork: - - 1001:2::/112 - additionalNTPSources: - - 1111:2222:3333:4444::2 - # Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate - # please see Workload Partitioning Feature for a complete guide. - cpuPartitioningMode: AllNodes - # Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster: - #crTemplates: - # KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml" - nodes: - - hostName: "example-node1.example.com" - role: "master" - # Optionally; This can be used to configure desired BIOS setting on a host: - #biosConfigRef: - # filePath: "example-hw.profile" - bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1" - bmcCredentialsName: - name: "example-node1-bmh-secret" - bootMACAddress: "AA:BB:CC:DD:EE:11" - # Use UEFISecureBoot to enable secure boot. - bootMode: "UEFISecureBoot" - rootDeviceHints: - deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0" - #crTemplates: - # BareMetalHost: "bmhOverride.yaml" - # disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md for more details - ignitionConfigOverride: | - { - "ignition": { - "version": "3.2.0" - }, - "storage": { - "disks": [ + "ignition": { + "version": "3.2.0" + }, + "storage": { + "disks": [ + { + "device": "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0", + "partitions": [ { - "device": "/dev/disk/by-id/wwn-0x6b07b250ebb9d0002a33509f24af1f62", - "partitions": [ - { - "label": "var-lib-containers", - "sizeMiB": 0, - "startMiB": 250000 - } - ], - "wipeTable": false + "label": "var-lib-containers", + "sizeMiB": 0, + "startMiB": 250000 } ], - "filesystems": [ - { - "device": "/dev/disk/by-partlabel/var-lib-containers", - "format": "xfs", - "mountOptions": [ - "defaults", - "prjquota" - ], - "path": "/var/lib/containers", - "wipeFilesystem": true - } - ] - }, - "systemd": { - "units": [ - { - "contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target", - "enabled": true, - "name": "var-lib-containers.mount" - } - ] + "wipeTable": false } - } - nodeNetwork: - interfaces: - - name: eno1 - macAddress: "AA:BB:CC:DD:EE:11" + ], + "filesystems": [ + { + "device": "/dev/disk/by-partlabel/var-lib-containers", + "format": "xfs", + "mountOptions": [ + "defaults", + "prjquota" + ], + "path": "/var/lib/containers", + "wipeFilesystem": true + } + ] + }, + "systemd": { + "units": [ + { + "contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target", + "enabled": true, + "name": "var-lib-containers.mount" + } + ] + } + } + nodeNetwork: + interfaces: + - name: eno1 + macAddress: "AA:BB:CC:DD:EE:11" + config: + interfaces: + - name: eno1 + type: ethernet + state: up + ipv4: + enabled: false + ipv6: + enabled: true + address: + # For SNO sites with static IP addresses, the node-specific, + # API and Ingress IPs should all be the same and configured on + # the interface + - ip: 1111:2222:3333:4444::aaaa:1 + prefix-length: 64 + dns-resolver: + config: + search: + - example.com + server: + - 1111:2222:3333:4444::2 + routes: config: - interfaces: - - name: eno1 - type: ethernet - state: up - ipv4: - enabled: false - ipv6: - enabled: true - address: - # For SNO sites with static IP addresses, the node-specific, - # API and Ingress IPs should all be the same and configured on - # the interface - - ip: 1111:2222:3333:4444::aaaa:1 - prefix-length: 64 - dns-resolver: - config: - search: - - example.com - server: - - 1111:2222:3333:4444::2 - routes: - config: - - destination: ::/0 - next-hop-interface: eno1 - next-hop-address: 1111:2222:3333:4444::1 - table-id: 254 + - destination: ::/0 + next-hop-interface: eno1 + next-hop-address: 1111:2222:3333:4444::1 + table-id: 254 + templateRefs: + - name: ai-node-templates-v1 + namespace: open-cluster-management \ No newline at end of file