Topology-ocm-byop
Parameters
Template
The following tabs display the definition's Cue template and the rendered YAML. The rendered YAML is the output of the Cue template when the definition is applied to a cluster.
The following tabs display the definition's Cue template and the rendered YAML. The rendered YAML is the output of the Cue template when the definition is applied to a cluster.
import (
"encoding/yaml"
"strings"
)
"topology-ocm-byop": {
description: "The OCM bring-your-own-placement (BYOP) topology policy enables multi-cluster-aware Workload deployment via the creation of a ManifestWorkReplicaSet targeting a preexisting Placement."
annotations: {}
labels: {
"policydefinition.spectrocloud.com/type": "topology"
}
attributes: {}
type: "policy"
}
template: {
output: {
_template: parameter.manifestWorkReplicaSet.manifestWorkTemplate
apiVersion: "work.open-cluster-management.io/v1alpha1"
kind: "ManifestWorkReplicaSet"
metadata: {
labels: {
"wl.spectrocloud.com/name": context.workloadName
"wl.spectrocloud.com/component": context.name
}
namespace: parameter.placementNamespace
}
spec: {
cascadeDeletionPolicy: "Foreground"
manifestWorkTemplate: {
manifestConfigs: [
{
resourceIdentifier: {
group: "spectrocloud.com"
name: context.workloadName
namespace: context.namespace
resource: "workloads"
}
feedbackRules: [
{
type: "JSONPaths"
jsonPaths: [
{
name: "phase"
path: ".status.phase"
},
{
name: "priorityPhases"
path: ".status.priorityPhases[*]"
},
{
name: "conditionReasons"
path: ".status.conditions[*].reason"
},
{
name: "conditionStatuses"
path: ".status.conditions[*].status"
},
{
name: "conditionTypes"
path: ".status.conditions[*].type"
},
{
name: "conditionTransitionTimes"
path: ".status.conditions[*].lastTransitionTime"
},
{
name: "components"
path: ".status.components"
},
{
name: "definitionOutputs"
path: ".status.definitionOutputs[*]"
},
{
name: "objectOutputs"
path: ".status.objectOutputs"
},
]
},
]
},
if _template.manifestConfigs != _|_ for mc in _template.manifestConfigs {
mc
},
]
if _template.deleteOption != _|_ {
deleteOption: _template.deleteOption
}
if _template.executor != _|_ {
executor: _template.executor
}
workload: {
manifests: [for v in context.workloadYamls {yaml.Unmarshal(v)}]
}
}
placementRefs: parameter.manifestWorkReplicaSet.placementRefs
}
}
#resourceId: {
// +usage=Enter API group name of the Kubernetes resource [[EmptyTip: Core group]]
group?: string
// +usage=Enter the name of the Kubernetes resource
name: string
// +usage=Enter the namespace of the Kubernetes resource [[EmptyTip: Cluster-scoped resource]]
namespace?: string
// +usage=Enter the Kubernetes resource type
resource: string
}
#feedbackRule: {
// +usage=Define JSON paths to retrieve selected fields from the resource's status [[RequiredIf: .type=="JSONPaths"]] [[ShowIf: .type=="JSONPaths"]] [[ItemTitleFrom: $.name]]
jsonPaths?: [...{
// +usage=Enter alias name for this feedback field
name: string
// +usage=Enter JSON path to a field under status [[Tooltip: The path must resolve to a valid field. If it points to a non-existent field, no feedback is reported and the StatusFeedbackSynced condition is set to false. See kubectl JSONPath documentation (https://kubernetes.io/docs/reference/kubectl/jsonpath).]]
path: string
// +usage=Enter the API version of the Kubernetes resource [[EmptyTip: semantically latest API version]]
version?: string
}]
// +usage=Select the type of feedback to collect from the resource status [[Tooltip: WellKnownStatus publishes common status fields for specific resource types, including Kubernetes resources like Deployment, Job, Pod, and DaemonSet, and Open Cluster Management resources like ManifestWork. If the expected status fields are not present, no values will be reported. JSONPaths collects and publishes status fields based on one or more specified JSON paths.]]
type: *"WellKnownStatus" | "JSONPaths"
}
#rolloutConfig: {
// +usage=Set maximum failure threshold as a percentage or number (e.g., 5, 25%, 100) [[Tooltip: Rollout stops when the number of failed clusters meets or exceeds this threshold. For Progressive: threshold is based on total clusters. For ProgressivePerGroup: based on current group size. This does not apply to MandatoryDecisionGroups, which always tolerate zero failures. A failure means the cluster reaches failed or timeout status (i.e., does not become successful within the ProgressDeadline). The default is that no failures are tolerated. Pattern: ^((100|[0-9]{1,2})%|[0-9]+)$]]
maxFailures: *"0" | =~"^((100|[0-9]{1,2})%|[0-9]+)$"
// +usage=Set minimum success duration before proceeding (e.g., 2h, 90m, 360s) [[Tooltip: "Soak time" — minimum wait from the start of each rollout before moving to the next phase. Applies only if a successful state is reached and MaxFailures is not breached. Default is 0, meaning proceed immediately after a successful state is reached. Pattern: ^(([0-9])+[h|m|s])$]]
minSuccessTime: *"0" | =~"^(([0-9])+[h|m|s])$"
// +usage=Set progress timeout duration (e.g., 2h, 90m, 360s) [[Tooltip: Defines how long workload applier controller will wait for the workload to reach a successful state in the cluster. If the workload does not reach a successful state after ProgressDeadline, will stop waiting and workload will be treated as "timeout" and be counted into MaxFailures. Once the MaxFailures is breached, the rollout will stop. ProgressDeadline default value is "None", meaning the workload applier will wait for a successful state indefinitely. Pattern: ^(([0-9])+[h|m|s])|None$]]
progressDeadline: *"None" | =~"^(([0-9])+[h|m|s])|None$"
}
#mandatoryDecisionGroup: {
// +usage=Enter decision group index [[Tooltip: Must match an existing placementDecision's label value for key cluster.open-cluster-management.io/decision-group-index]]
groupIndex: int
// +usage=Enter decision group name [[Tooltip: Must match an existing placementDecision's label value for key cluster.open-cluster-management.io/decision-group-name]]
groupName: string
}
// +usage=Configure ManifestWork replication across clusters [[Tooltip: This resource creates ManifestWorks in the namespaces of selected ManagedClusters based on PlacementDecisions. When the ManifestWorkReplicaSet is deleted, the associated ManifestWorks are also removed. It continuously updates the per-cluster ManifestWorks to reflect changes in PlacementDecisions. Supports 0 to many ManagedClusters.]]
#manifestWorkReplicaSet: {
// +usage=Configure the ManifestWorkSpec used to generate a ManifestWork for each cluster [[Tooltip: This template defines the structure of each per-cluster ManifestWork based on the ManifestWorkSpec schema]]
manifestWorkTemplate?: {
// +usage=Configure resource deletion strategy [[Tooltip: Defines how resources in the ManifestWork are handled when it is deleted. If not set, the Foreground deletion strategy is applied by default.]]
deleteOption?: {
// +usage=Select a deletion propagation policy [[Tooltip: Foreground waits until all dependent resources are deleted. Orphan leaves resources on the cluster after the ManifestWork is deleted. SelectivelyOrphan keeps only the specified resources and is typically used to transfer ownership between ManifestWorks. For example, you can 1. create manifestwork/2 to manage a resource, 2. update manifestwork/1 to selectively orphan it, and 3. remove the resource from manifestwork/1 without disruption as manifestwork/2 takes over.]]
propagationPolicy: *"Foreground" | "Orphan" | "SelectivelyOrphan"
// +usage=Configure selective orphaning rules [[ShowIf: .propagationPolicy=="SelectivelyOrphan"]]
selectivelyOrphans?: {
// +usage=Define orphaning rules for selective deletion [[Tooltip: Each rule identifies a specific resource included in this ManifestWork that should be left on the cluster (orphaned) when the ManifestWork is deleted.]] [[ItemTitleFrom: $.name]]
orphaningRules!: [...#resourceId]
}
}
// +usage=Configure executor settings for the work agent [[Tooltip: The executor identity allows the work agent to perform pre-request processing, such as verifying it has permission to apply workloads to the local managed cluster. If not set, no additional actions are performed before applying resources (supported for backward compatibility).]]
executor?: {
// +usage=Configure the subject identity used by the work agent to apply resources to the local cluster
subject: {
// +usage=Configure the service account used by the work agent [[ShowIf: .type=="ServiceAccount"]] [[RequiredIf: .type=="ServiceAccount"]]
serviceAccount?: {
// +usage=Enter the name of the service account [[Tooltip: Must consist of lower case alphanumeric characters, hyphens, or periods. Must start and end with an alphanumeric character. Maximum 253 characters. Pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$]]
name: =~"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$" & strings.MinRunes(1) & strings.MaxRunes(253)
// +usage=Enter the namespace of the service account [[Tooltip: Must consist of lower case alphanumeric characters, hyphens, or periods. Must start and end with an alphanumeric character. Maximum 253 characters. Pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$]]
namespace: =~"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$" & strings.MinRunes(1) & strings.MaxRunes(253)
}
// +usage=Select subject identity type
type: "ServiceAccount"
}
}
// +usage=Configure manifest resource settings [[Tooltip: Defines feedback rules and update strategies for workload resources. Each configuration must specify either feedbackRules, updateStrategy, or both to be meaningful.]] [[ItemTitleFrom: $.resourceIdentifier.name]]
manifestConfigs?: [...{
// +usage=Specify the group, resource, name, and namespace of a resource [[Tooltip: Rules will only be executed if the resource was created by this manifest work]]
resourceIdentifier: #resourceId
// +usage=Define resource status feedback rules [[Tooltip: Feedback rules determine which status fields are reported from the managed resource. If not set, no feedback will be collected.]]
feedbackRules?: [...#feedbackRule]
// +usage=Configure how this manifest should be updated on the cluster [[Tooltip: If not set, no update strategy will be applied]]
updateStrategy?: {
// +usage=Configure server-side apply settings [[ShowIf: .type=="ServerSideApply"]]
serverSideApply?: {
// +usage=Enter the name of the field manager used to apply the resource [[Tooltip: Defaults to work-agent, but can be any name with work-agent as the prefix. Pattern: ^work-agent(-[a-zA-Z0-9]+)*$]]
fieldManager: *"work-agent" | =~"^work-agent(-[a-zA-Z0-9]+)*$"
// +usage=Set whether to force apply the resource
force: bool
}
// +usage=Select update strategy type [[Tooltip: Update – Updates the resource using a standard update call. CreateOnly – Creates the resource once; no updates after creation. ReadOnly – Checks only for the existence of the resource via metadata; does not create or update. Feedback via statusFeedbackRules is still allowed. ServerSideApply – Uses server-side apply with work-controller as the field manager. On conflict, the Applied condition will be False with reason ApplyConflict.]]
type: *"Update" | "CreateOnly" | "ReadOnly" | "ServerSideApply"
}
}]
}
// +usage=Configure placement references to distribute the workload [[Tooltip: References existing Placement resources; PlacementDecisions derived from them determine the target clusters for deploying ManifestWorks.]] [[ItemTitleFrom: $.name]]
placementRefs: [...{
// +usage=Enter the name of the Placement resource [[Tooltip: Must exist in the specified placement namespace]]
name: string
// +usage=Configure workload rollout strategy
rolloutStrategy: {
// +usage=Configure rollout for all clusters simultaneously [[ShowIf: .type=="All"]]
all?: {
// +usage=Set progress timeout duration (e.g., 2h, 90m, 360s) [[Tooltip: Defines how long workload applier controller will wait for the workload to reach a successful state in the cluster. If the workload does not reach a successful state after ProgressDeadline, will stop waiting and workload will be treated as "timeout" and be counted into MaxFailures. Once the MaxFailures is breached, the rollout will stop. ProgressDeadline default value is "None", meaning the workload applier will wait for a successful state indefinitely. Pattern: ^(([0-9])+[h|m|s])|None$]]
progressDeadline: *"None" | =~"^(([0-9])+[h|m|s])|None$"
}
// +usage=Configure progressive rollout strategy [[ShowIf: .type=="Progressive"]]
progressive?: {
#rolloutConfig
// +usage=Define mandatory decision groups that must succeed first [[Tooltip: These groups are applied before others. If they don't reach a successful state, the rollout fails. GroupName or GroupIndex must match the decisionGroups defined in the placement's decisionStrategy.]] [[ItemTitleFrom: $.groupName]]
mandatoryDecisionGroups: [...#mandatoryDecisionGroup]
// +usage=Set maximum concurrent cluster deployments as a number or percentage (e.g., 5, 25%, 100) [[Tooltip: Maximum number of clusters to deploy the workload to at the same time. If not set, the value is determined from clustersPerDecisionGroup in the placement's DecisionStrategy. Pattern: ^((100|[0-9]{1,2})%|[0-9]+)$]]
maxConcurrency?: =~"^((100|[0-9]{1,2})%|[0-9]+)$"
}
// +usage=Configure progressive per group rollout strategy [[ShowIf: .type=="ProgressivePerGroup"]]
progressivePerGroup?: {
#rolloutConfig
// +usage=Specify decision groups that must succeed before others [[Tooltip: These groups are rolled out first. If any of them fail to reach a successful state, the rollout is halted. GroupName or GroupIndex must match the decisionGroups defined in the placement's decisionStrategy.]] [[ItemTitleFrom: $.groupName]]
mandatoryDecisionGroups: [...#mandatoryDecisionGroup]
}
// +usage=Select rollout strategy type [[Tooltip: All: Apply workload to all clusters at once; Progressive: Apply workload progressively per cluster; ProgressivePerGroup: Apply workload to clusters progressively per group]]
type: *"All" | "Progressive" | "ProgressivePerGroup"
}
}]
}
parameter: {
// +usage=Defines resources and lifecycle configuration for applying workloads to selected clusters
manifestWorkReplicaSet: #manifestWorkReplicaSet
// +usage=Enter namespace containing the existing Placement resource [[Tooltip: The ManifestWorkReplicaSet will be created in this namespace and reference Placement resources from here]]
placementNamespace: string
}
}