Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 14 additions & 3 deletions controllers/workspace/devworkspace_controller.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright (c) 2019-2025 Red Hat, Inc.
// Copyright (c) 2019-2026 Red Hat, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
Expand Down Expand Up @@ -454,8 +454,19 @@ func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request
return r.failWorkspace(workspace, fmt.Sprintf("Failed to mount SSH askpass script to workspace: %s", err), metrics.ReasonWorkspaceEngineFailure, reqLogger, &reconcileStatus), nil
}

// Add automount resources into devfile containers
err = automount.ProvisionAutoMountResourcesInto(devfilePodAdditions, clusterAPI, workspace.Namespace, home.PersistUserHomeEnabled(workspace))
var workspaceDeployment *appsv1.Deployment

isWorkspaceRunning := workspace.Status.Phase == dw.DevWorkspaceStatusRunning
if isWorkspaceRunning {
// Fetch the existing deployment to determine whether automount resources with
// `controller.devfile.io/mount-on-start=true` can be mounted without a restart.
// Only needed when the workspace is already running; skip otherwise to reduce API calls.
if workspaceDeployment, err = wsprovision.GetClusterDeployment(workspace, clusterAPI); err != nil {
return reconcile.Result{}, err
}
}
Comment thread
tolusha marked this conversation as resolved.

err = automount.ProvisionAutoMountResourcesInto(devfilePodAdditions, clusterAPI, workspace.Namespace, home.PersistUserHomeEnabled(workspace), isWorkspaceRunning, workspaceDeployment)
if shouldReturn, reconcileResult, reconcileErr := r.checkDWError(workspace, err, "Failed to process automount resources", metrics.ReasonBadRequest, reqLogger, &reconcileStatus); shouldReturn {
return reconcileResult, reconcileErr
}
Expand Down
8 changes: 7 additions & 1 deletion pkg/constants/attributes.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright (c) 2019-2025 Red Hat, Inc.
// Copyright (c) 2019-2026 Red Hat, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
Expand Down Expand Up @@ -171,4 +171,10 @@ const (
// controller.devfile.io/restore-source-image: "registry.example.com/backups/my-workspace:20241111-123456"
//
WorkspaceRestoreSourceImageAttribute = "controller.devfile.io/restore-source-image"

// MountOnStartAttribute is an attribute applied to Kubernetes resources to indicate that they should only
// be mounted to a workspace when it starts. When this attribute is set to "true", newly created
// resources will not be automatically mounted to running workspaces, preventing unwanted workspace
// restarts.
MountOnStartAttribute = "controller.devfile.io/mount-on-start"
Comment thread
tolusha marked this conversation as resolved.
)
107 changes: 99 additions & 8 deletions pkg/provision/automount/common.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright (c) 2019-2025 Red Hat, Inc.
// Copyright (c) 2019-2026 Red Hat, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
Expand All @@ -24,6 +24,7 @@ import (

"github.com/devfile/devworkspace-operator/pkg/constants"
"github.com/devfile/devworkspace-operator/pkg/dwerrors"
appsv1 "k8s.io/api/apps/v1"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"

"github.com/devfile/devworkspace-operator/apis/controller/v1alpha1"
Expand All @@ -42,8 +43,15 @@ type Resources struct {
EnvFromSource []corev1.EnvFromSource
}

func ProvisionAutoMountResourcesInto(podAdditions *v1alpha1.PodAdditions, api sync.ClusterAPI, namespace string, persistentHome bool) error {
resources, err := getAutomountResources(api, namespace)
func ProvisionAutoMountResourcesInto(
podAdditions *v1alpha1.PodAdditions,
api sync.ClusterAPI,
namespace string,
persistentHome bool,
isWorkspaceRunning bool,
workspaceDeployment *appsv1.Deployment,
) error {
resources, err := getAutomountResources(api, namespace, isWorkspaceRunning, workspaceDeployment)

if err != nil {
return err
Expand Down Expand Up @@ -76,18 +84,23 @@ func ProvisionAutoMountResourcesInto(podAdditions *v1alpha1.PodAdditions, api sy
return nil
}

func getAutomountResources(api sync.ClusterAPI, namespace string) (*Resources, error) {
gitCMAutoMountResources, err := ProvisionGitConfiguration(api, namespace)
func getAutomountResources(
api sync.ClusterAPI,
namespace string,
isWorkspaceRunning bool,
workspaceDeployment *appsv1.Deployment,
) (*Resources, error) {
gitCMAutoMountResources, err := ProvisionGitConfiguration(api, namespace, isWorkspaceRunning, workspaceDeployment)
if err != nil {
return nil, err
}

cmAutoMountResources, err := getDevWorkspaceConfigmaps(namespace, api)
cmAutoMountResources, err := getDevWorkspaceConfigmaps(namespace, api, isWorkspaceRunning, workspaceDeployment)
if err != nil {
return nil, err
}

secretAutoMountResources, err := getDevWorkspaceSecrets(namespace, api)
secretAutoMountResources, err := getDevWorkspaceSecrets(namespace, api, isWorkspaceRunning, workspaceDeployment)
if err != nil {
return nil, err
}
Expand All @@ -104,7 +117,7 @@ func getAutomountResources(api sync.ClusterAPI, namespace string) (*Resources, e
}
dropItemsFieldFromVolumes(mergedResources.Volumes)

pvcAutoMountResources, err := getAutoMountPVCs(namespace, api)
pvcAutoMountResources, err := getAutoMountPVCs(namespace, api, isWorkspaceRunning, workspaceDeployment)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -354,3 +367,81 @@ func sortConfigmaps(cms []corev1.ConfigMap) {
return cms[i].Name < cms[j].Name
})
}

func isMountOnStart(obj k8sclient.Object) bool {
return obj.GetAnnotations()[constants.MountOnStartAttribute] == "true"
}

// isAllowedToMount checks whether an automount resource can be added to the workspace pod.
// Resources marked with mount-on-start are only allowed when
// the workspace is not yet running or when they are already present in the current deployment.
func isAllowedToMount(
obj k8sclient.Object,
automountResource Resources,
isWorkspaceRunning bool,
workspaceDeployment *appsv1.Deployment,
) bool {
// No existing deployment to compare against — allow everything
if workspaceDeployment == nil {
return true
}

// Resource without mount-on-start is always eligible
if !isMountOnStart(obj) {
return true
}

// Workspace is not yet running — the pod will be (re)created with these resources included
if !isWorkspaceRunning {
return true
}

// Workspace is already running — only allow if already present in the deployment
return existsInDeployment(automountResource, workspaceDeployment)
}
Comment thread
tolusha marked this conversation as resolved.

func existsInDeployment(automountResource Resources, workspaceDeployment *appsv1.Deployment) bool {
return isVolumeMountExistsInDeployment(automountResource, workspaceDeployment) ||
isEnvFromSourceExistsInDeployment(automountResource, workspaceDeployment)
}

// isVolumeMountExistsInDeployment returns true if any volume from the automount resource
// is already present in the workspace deployment's pod spec. Comparison is by name only,
// ignoring VolumeSource — if a name is reused after deleting the old resource, the deletion
// triggers reconciliation and a workspace restart before the new resource is mounted.
func isVolumeMountExistsInDeployment(automountResource Resources, workspaceDeployment *appsv1.Deployment) bool {
for _, automountVolumes := range automountResource.Volumes {
for _, deploymentVolumes := range workspaceDeployment.Spec.Template.Spec.Volumes {
if automountVolumes.Name == deploymentVolumes.Name {
return true
}
}
}

return false
}
Comment thread
tolusha marked this conversation as resolved.

// isEnvFromSourceExistsInDeployment returns true if any EnvFromSource from the automount resource
// is already referenced in a container of the workspace deployment, matched by ConfigMap or Secret name.
func isEnvFromSourceExistsInDeployment(automountResource Resources, workspaceDeployment *appsv1.Deployment) bool {
for _, container := range workspaceDeployment.Spec.Template.Spec.Containers {

for _, automountEnvFrom := range automountResource.EnvFromSource {
for _, containerEnvFrom := range container.EnvFrom {
if automountEnvFrom.ConfigMapRef != nil && containerEnvFrom.ConfigMapRef != nil &&
automountEnvFrom.ConfigMapRef.Name == containerEnvFrom.ConfigMapRef.Name {

return true
}

if automountEnvFrom.SecretRef != nil && containerEnvFrom.SecretRef != nil &&
automountEnvFrom.SecretRef.Name == containerEnvFrom.SecretRef.Name {

return true
}
}
}
}

return false
}
4 changes: 2 additions & 2 deletions pkg/provision/automount/common_persistenthome_test.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright (c) 2019-2025 Red Hat, Inc.
// Copyright (c) 2019-2026 Red Hat, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
Expand Down Expand Up @@ -56,7 +56,7 @@ func TestProvisionAutomountResourcesIntoPersistentHomeEnabled(t *testing.T) {
Client: fake.NewClientBuilder().WithObjects(tt.Input.allObjects...).Build(),
}

err := ProvisionAutoMountResourcesInto(podAdditions, testAPI, testNamespace, true)
err := ProvisionAutoMountResourcesInto(podAdditions, testAPI, testNamespace, true, false, nil)

if !assert.NoError(t, err, "Unexpected error") {
return
Expand Down
Loading
Loading