Skip to content

Commit cd8ec7a

Browse files
committed
fixup! Add WorkspaceWithConfig struct, use it in controller
1 parent 24a9138 commit cd8ec7a

File tree

11 files changed

+195
-197
lines changed

11 files changed

+195
-197
lines changed

controllers/workspace/devworkspace_controller.go

Lines changed: 90 additions & 91 deletions
Large diffs are not rendered by default.

controllers/workspace/status.go

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -63,20 +63,20 @@ var clock kubeclock.Clock = &kubeclock.RealClock{}
6363
// updateWorkspaceStatus updates the current workspace's status field with conditions and phase from the passed in status.
6464
// Parameters for result and error are returned unmodified, unless error is nil and another error is encountered while
6565
// updating the status.
66-
func (r *DevWorkspaceReconciler) updateWorkspaceStatus(workspaceWithConfig *common.DevWorkspaceWithConfig, logger logr.Logger, status *currentStatus, reconcileResult reconcile.Result, reconcileError error) (reconcile.Result, error) {
67-
syncConditions(&workspaceWithConfig.Status, status)
68-
oldPhase := workspaceWithConfig.Status.Phase
69-
workspaceWithConfig.Status.Phase = status.phase
66+
func (r *DevWorkspaceReconciler) updateWorkspaceStatus(workspace *common.DevWorkspaceWithConfig, logger logr.Logger, status *currentStatus, reconcileResult reconcile.Result, reconcileError error) (reconcile.Result, error) {
67+
syncConditions(&workspace.Status, status)
68+
oldPhase := workspace.Status.Phase
69+
workspace.Status.Phase = status.phase
7070

71-
infoMessage := getInfoMessage(&workspaceWithConfig.DevWorkspace, status)
72-
if warn := conditions.GetConditionByType(workspaceWithConfig.Status.Conditions, conditions.DevWorkspaceWarning); warn != nil && warn.Status == corev1.ConditionTrue {
71+
infoMessage := getInfoMessage(&workspace.DevWorkspace, status)
72+
if warn := conditions.GetConditionByType(workspace.Status.Conditions, conditions.DevWorkspaceWarning); warn != nil && warn.Status == corev1.ConditionTrue {
7373
infoMessage = fmt.Sprintf("%s %s", warningPresentInfoMessage, infoMessage)
7474
}
75-
if workspaceWithConfig.Status.Message != infoMessage {
76-
workspaceWithConfig.Status.Message = infoMessage
75+
if workspace.Status.Message != infoMessage {
76+
workspace.Status.Message = infoMessage
7777
}
7878

79-
err := r.Status().Update(context.TODO(), &workspaceWithConfig.DevWorkspace)
79+
err := r.Status().Update(context.TODO(), &workspace.DevWorkspace)
8080
if err != nil {
8181
if k8sErrors.IsConflict(err) {
8282
logger.Info("Failed to update workspace status due to conflict; retrying")
@@ -87,7 +87,7 @@ func (r *DevWorkspaceReconciler) updateWorkspaceStatus(workspaceWithConfig *comm
8787
}
8888
}
8989
} else {
90-
updateMetricsForPhase(workspaceWithConfig, oldPhase, status.phase, logger)
90+
updateMetricsForPhase(workspace, oldPhase, status.phase, logger)
9191
}
9292

9393
return reconcileResult, reconcileError
@@ -224,15 +224,15 @@ func getInfoMessage(workspace *dw.DevWorkspace, status *currentStatus) string {
224224
// updateMetricsForPhase increments DevWorkspace startup metrics based on phase transitions in a DevWorkspace. It avoids
225225
// incrementing the underlying metrics where possible (e.g. reconciling an already running workspace) by only incrementing
226226
// counters when the new phase is different from the current on in the DevWorkspace.
227-
func updateMetricsForPhase(workspaceWithConfig *common.DevWorkspaceWithConfig, oldPhase, newPhase dw.DevWorkspacePhase, logger logr.Logger) {
227+
func updateMetricsForPhase(workspace *common.DevWorkspaceWithConfig, oldPhase, newPhase dw.DevWorkspacePhase, logger logr.Logger) {
228228
if oldPhase == newPhase {
229229
return
230230
}
231231
switch newPhase {
232232
case dw.DevWorkspaceStatusRunning:
233-
metrics.WorkspaceRunning(workspaceWithConfig, logger)
233+
metrics.WorkspaceRunning(workspace, logger)
234234
case dw.DevWorkspaceStatusFailed:
235-
metrics.WorkspaceFailed(&workspaceWithConfig.DevWorkspace, logger)
235+
metrics.WorkspaceFailed(&workspace.DevWorkspace, logger)
236236
}
237237
}
238238

@@ -266,17 +266,17 @@ func checkForStartTimeout(workspace *dw.DevWorkspace, config v1alpha1.OperatorCo
266266
// configured progress timeout. If the workspace is not in the Failing state or does not have a DevWorkspaceFailed
267267
// condition set, returns false. Otherwise, returns true if the workspace has timed out. Returns an error if
268268
// timeout is configured with an unparsable duration.
269-
func checkForFailingTimeout(workspaceWithConfig *common.DevWorkspaceWithConfig) (isTimedOut bool, err error) {
270-
if workspaceWithConfig.Status.Phase != devworkspacePhaseFailing {
269+
func checkForFailingTimeout(workspace *common.DevWorkspaceWithConfig) (isTimedOut bool, err error) {
270+
if workspace.Status.Phase != devworkspacePhaseFailing {
271271
return false, nil
272272
}
273-
timeout, err := time.ParseDuration(workspaceWithConfig.Config.Workspace.ProgressTimeout)
273+
timeout, err := time.ParseDuration(workspace.Config.Workspace.ProgressTimeout)
274274
if err != nil {
275275
return false, fmt.Errorf("invalid duration specified for timeout: %w", err)
276276
}
277277
currTime := clock.Now()
278278
failedTime := time.Time{}
279-
for _, condition := range workspaceWithConfig.Status.Conditions {
279+
for _, condition := range workspace.Status.Conditions {
280280
if condition.Type == dw.DevWorkspaceFailedStart {
281281
failedTime = condition.LastTransitionTime.Time
282282
}

pkg/library/defaults/helper.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,16 +22,16 @@ import (
2222
// Overwrites the content of the workspace's Template Spec with the workspace config's Template Spec,
2323
// with the exception of the workspace's projects.
2424
// If the workspace's Template Spec defined any projects, they are preserved.
25-
func ApplyDefaultTemplate(workspaceWithConfig *common.DevWorkspaceWithConfig) {
26-
if workspaceWithConfig.Config.Workspace.DefaultTemplate == nil {
25+
func ApplyDefaultTemplate(workspace *common.DevWorkspaceWithConfig) {
26+
if workspace.Config.Workspace.DefaultTemplate == nil {
2727
return
2828
}
29-
defaultCopy := workspaceWithConfig.Config.Workspace.DefaultTemplate.DeepCopy()
30-
originalProjects := workspaceWithConfig.Spec.Template.Projects
31-
workspaceWithConfig.Spec.Template.DevWorkspaceTemplateSpecContent = *defaultCopy
32-
workspaceWithConfig.Spec.Template.Projects = append(workspaceWithConfig.Spec.Template.Projects, originalProjects...)
29+
defaultCopy := workspace.Config.Workspace.DefaultTemplate.DeepCopy()
30+
originalProjects := workspace.Spec.Template.Projects
31+
workspace.Spec.Template.DevWorkspaceTemplateSpecContent = *defaultCopy
32+
workspace.Spec.Template.Projects = append(workspace.Spec.Template.Projects, originalProjects...)
3333
}
3434

35-
func NeedsDefaultTemplate(workspaceWithConfig *common.DevWorkspaceWithConfig) bool {
36-
return len(workspaceWithConfig.Spec.Template.Components) == 0 && workspaceWithConfig.Config.Workspace.DefaultTemplate != nil
35+
func NeedsDefaultTemplate(workspace *common.DevWorkspaceWithConfig) bool {
36+
return len(workspace.Spec.Template.Components) == 0 && workspace.Config.Workspace.DefaultTemplate != nil
3737
}

pkg/library/projects/clone.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,15 @@ const (
3232
projectClonerContainerName = "project-clone"
3333
)
3434

35-
func GetProjectCloneInitContainer(workspaceWithConfig *common.DevWorkspaceWithConfig) (*corev1.Container, error) {
35+
func GetProjectCloneInitContainer(workspace *common.DevWorkspaceWithConfig) (*corev1.Container, error) {
3636

37-
workspace := workspaceWithConfig.Spec.Template
38-
if len(workspace.Projects) == 0 {
37+
if len(workspace.Spec.Template.Projects) == 0 {
3938
return nil, nil
4039
}
41-
if workspace.Attributes.GetString(constants.ProjectCloneAttribute, nil) == constants.ProjectCloneDisable {
40+
if workspace.Spec.Template.Attributes.GetString(constants.ProjectCloneAttribute, nil) == constants.ProjectCloneDisable {
4241
return nil, nil
4342
}
44-
if !hasContainerComponents(&workspace) {
43+
if !hasContainerComponents(&workspace.Spec.Template) {
4544
// Avoid adding project-clone init container when DevWorkspace does not define any containers
4645
return nil, nil
4746
}
@@ -94,7 +93,7 @@ func GetProjectCloneInitContainer(workspaceWithConfig *common.DevWorkspaceWithCo
9493
MountPath: constants.DefaultProjectsSourcesRoot,
9594
},
9695
},
97-
ImagePullPolicy: corev1.PullPolicy(workspaceWithConfig.Config.Workspace.ImagePullPolicy),
96+
ImagePullPolicy: corev1.PullPolicy(workspace.Config.Workspace.ImagePullPolicy),
9897
}, nil
9998
}
10099

pkg/provision/storage/asyncStorage.go

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -46,39 +46,39 @@ func (*AsyncStorageProvisioner) NeedsStorage(workspace *dw.DevWorkspaceTemplateS
4646
return needsStorage(workspace)
4747
}
4848

49-
func (p *AsyncStorageProvisioner) ProvisionStorage(podAdditions *v1alpha1.PodAdditions, workspaceWithConfig *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) error {
49+
func (p *AsyncStorageProvisioner) ProvisionStorage(podAdditions *v1alpha1.PodAdditions, workspace *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) error {
5050
if err := checkConfigured(); err != nil {
5151
return &ProvisioningError{
5252
Message: fmt.Sprintf("%s. Contact an administrator to resolve this issue.", err.Error()),
5353
}
5454
}
5555

56-
numWorkspaces, _, err := p.getAsyncWorkspaceCount(workspaceWithConfig.Namespace, clusterAPI)
56+
numWorkspaces, _, err := p.getAsyncWorkspaceCount(workspace.Namespace, clusterAPI)
5757
if err != nil {
5858
return err
5959
}
6060
// If there is more than one started workspace using async storage, then we fail starting additional ones
6161
// Note we need to check phase so as to not accidentally fail an already-running workspace when a second one
6262
// is created.
63-
if numWorkspaces > 1 && workspaceWithConfig.Status.Phase != dw.DevWorkspaceStatusRunning {
63+
if numWorkspaces > 1 && workspace.Status.Phase != dw.DevWorkspaceStatusRunning {
6464
return &ProvisioningError{
65-
Message: fmt.Sprintf("cannot provision storage for workspace %s", workspaceWithConfig.Name),
65+
Message: fmt.Sprintf("cannot provision storage for workspace %s", workspace.Name),
6666
Err: fmt.Errorf("at most one workspace using async storage can be running in a namespace"),
6767
}
6868
}
6969

7070
// Add ephemeral volumes
71-
if err := addEphemeralVolumesFromWorkspace(&workspaceWithConfig.DevWorkspace, podAdditions); err != nil {
71+
if err := addEphemeralVolumesFromWorkspace(&workspace.DevWorkspace, podAdditions); err != nil {
7272
return err
7373
}
7474

7575
// If persistent storage is not needed, we're done
76-
if !p.NeedsStorage(&workspaceWithConfig.Spec.Template) {
76+
if !p.NeedsStorage(&workspace.Spec.Template) {
7777
return nil
7878
}
7979

8080
// Sync SSH keypair to cluster
81-
secret, configmap, err := asyncstorage.GetOrCreateSSHConfig(&workspaceWithConfig.DevWorkspace, clusterAPI)
81+
secret, configmap, err := asyncstorage.GetOrCreateSSHConfig(&workspace.DevWorkspace, clusterAPI)
8282
if err != nil {
8383
if errors.Is(err, asyncstorage.NotReadyError) {
8484
return &NotReadyError{
@@ -89,12 +89,12 @@ func (p *AsyncStorageProvisioner) ProvisionStorage(podAdditions *v1alpha1.PodAdd
8989
return err
9090
}
9191

92-
pvcName, err := checkForExistingCommonPVC(workspaceWithConfig.Namespace, clusterAPI)
92+
pvcName, err := checkForExistingCommonPVC(workspace.Namespace, clusterAPI)
9393
if err != nil {
9494
return err
9595
}
9696

97-
pvcTerminating, err := checkPVCTerminating(pvcName, workspaceWithConfig.Namespace, clusterAPI, workspaceWithConfig.Config)
97+
pvcTerminating, err := checkPVCTerminating(pvcName, workspace.Namespace, clusterAPI, workspace.Config)
9898
if err != nil {
9999
return err
100100
} else if pvcTerminating {
@@ -106,15 +106,15 @@ func (p *AsyncStorageProvisioner) ProvisionStorage(podAdditions *v1alpha1.PodAdd
106106

107107
if pvcName != "" {
108108
// Create common PVC if needed
109-
clusterPVC, err := syncCommonPVC(workspaceWithConfig.Namespace, clusterAPI, workspaceWithConfig.Config)
109+
clusterPVC, err := syncCommonPVC(workspace.Namespace, clusterAPI, workspace.Config)
110110
if err != nil {
111111
return err
112112
}
113113
pvcName = clusterPVC.Name
114114
}
115115

116116
// Create async server deployment
117-
deploy, err := asyncstorage.SyncWorkspaceSyncDeploymentToCluster(workspaceWithConfig.Namespace, configmap, pvcName, clusterAPI, workspaceWithConfig.Config)
117+
deploy, err := asyncstorage.SyncWorkspaceSyncDeploymentToCluster(workspace.Namespace, configmap, pvcName, clusterAPI, workspace.Config)
118118
if err != nil {
119119
if errors.Is(err, asyncstorage.NotReadyError) {
120120
return &NotReadyError{
@@ -157,40 +157,40 @@ func (p *AsyncStorageProvisioner) ProvisionStorage(podAdditions *v1alpha1.PodAdd
157157
return err
158158
}
159159

160-
volumes, err := p.addVolumesForAsyncStorage(podAdditions, &workspaceWithConfig.DevWorkspace)
160+
volumes, err := p.addVolumesForAsyncStorage(podAdditions, &workspace.DevWorkspace)
161161
if err != nil {
162162
return err
163163
}
164164

165165
sshSecretVolume := asyncstorage.GetVolumeFromSecret(secret)
166-
asyncSidecar := asyncstorage.GetAsyncSidecar(workspaceWithConfig.Status.DevWorkspaceId, sshSecretVolume.Name, volumes)
166+
asyncSidecar := asyncstorage.GetAsyncSidecar(workspace.Status.DevWorkspaceId, sshSecretVolume.Name, volumes)
167167
podAdditions.Containers = append(podAdditions.Containers, *asyncSidecar)
168168
podAdditions.Volumes = append(podAdditions.Volumes, *sshSecretVolume)
169169

170170
return nil
171171
}
172172

173-
func (p *AsyncStorageProvisioner) CleanupWorkspaceStorage(workspaceWithConfig *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) error {
173+
func (p *AsyncStorageProvisioner) CleanupWorkspaceStorage(workspace *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) error {
174174
// TODO: This approach relies on there being a maximum of one workspace running per namespace.
175-
asyncDeploy, err := asyncstorage.GetWorkspaceSyncDeploymentCluster(workspaceWithConfig.Namespace, clusterAPI)
175+
asyncDeploy, err := asyncstorage.GetWorkspaceSyncDeploymentCluster(workspace.Namespace, clusterAPI)
176176
if err != nil {
177177
if k8sErrors.IsNotFound(err) {
178-
return runCommonPVCCleanupJob(workspaceWithConfig, clusterAPI)
178+
return runCommonPVCCleanupJob(workspace, clusterAPI)
179179
} else {
180180
return err
181181
}
182182
}
183183

184184
// Check if another workspace is currently using the async server
185-
numWorkspaces, totalWorkspaces, err := p.getAsyncWorkspaceCount(workspaceWithConfig.Namespace, clusterAPI)
185+
numWorkspaces, totalWorkspaces, err := p.getAsyncWorkspaceCount(workspace.Namespace, clusterAPI)
186186
if err != nil {
187187
return err
188188
}
189189
switch numWorkspaces {
190190
case 0:
191191
// no problem
192192
case 1:
193-
if workspaceWithConfig.Spec.Started {
193+
if workspace.Spec.Started {
194194
// This is the only workspace using the async server, we can safely stop it
195195
break
196196
}
@@ -219,12 +219,12 @@ func (p *AsyncStorageProvisioner) CleanupWorkspaceStorage(workspaceWithConfig *c
219219
}
220220

221221
// Clean up PVC using usual job
222-
err = runCommonPVCCleanupJob(workspaceWithConfig, clusterAPI)
222+
err = runCommonPVCCleanupJob(workspace, clusterAPI)
223223
if err != nil {
224224
return err
225225
}
226226

227-
retry, err := asyncstorage.RemoveAuthorizedKeyFromConfigMap(&workspaceWithConfig.DevWorkspace, clusterAPI)
227+
retry, err := asyncstorage.RemoveAuthorizedKeyFromConfigMap(&workspace.DevWorkspace, clusterAPI)
228228
if err != nil {
229229
return &ProvisioningError{
230230
Message: "Failed to remove authorized key from async storage configmap",

pkg/provision/storage/cleanup.go

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -113,30 +113,30 @@ func runCommonPVCCleanupJob(workspace *common.DevWorkspaceWithConfig, clusterAPI
113113
}
114114
}
115115

116-
func getSpecCommonPVCCleanupJob(workspaceWithConfig *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) (*batchv1.Job, error) {
117-
workspaceId := workspaceWithConfig.Status.DevWorkspaceId
116+
func getSpecCommonPVCCleanupJob(workspace *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) (*batchv1.Job, error) {
117+
workspaceId := workspace.Status.DevWorkspaceId
118118

119-
pvcName, err := checkForExistingCommonPVC(workspaceWithConfig.Namespace, clusterAPI)
119+
pvcName, err := checkForExistingCommonPVC(workspace.Namespace, clusterAPI)
120120
if err != nil {
121121
return nil, err
122122
}
123123
if pvcName == "" {
124-
pvcName = workspaceWithConfig.Config.Workspace.PVCName
124+
pvcName = workspace.Config.Workspace.PVCName
125125
}
126126

127127
jobLabels := map[string]string{
128128
constants.DevWorkspaceIDLabel: workspaceId,
129-
constants.DevWorkspaceNameLabel: workspaceWithConfig.Name,
130-
constants.DevWorkspaceCreatorLabel: workspaceWithConfig.Labels[constants.DevWorkspaceCreatorLabel],
129+
constants.DevWorkspaceNameLabel: workspace.Name,
130+
constants.DevWorkspaceCreatorLabel: workspace.Labels[constants.DevWorkspaceCreatorLabel],
131131
}
132-
if restrictedAccess, needsRestrictedAccess := workspaceWithConfig.Annotations[constants.DevWorkspaceRestrictedAccessAnnotation]; needsRestrictedAccess {
132+
if restrictedAccess, needsRestrictedAccess := workspace.Annotations[constants.DevWorkspaceRestrictedAccessAnnotation]; needsRestrictedAccess {
133133
jobLabels[constants.DevWorkspaceRestrictedAccessAnnotation] = restrictedAccess
134134
}
135135

136136
job := &batchv1.Job{
137137
ObjectMeta: metav1.ObjectMeta{
138138
Name: common.PVCCleanupJobName(workspaceId),
139-
Namespace: workspaceWithConfig.Namespace,
139+
Namespace: workspace.Namespace,
140140
Labels: jobLabels,
141141
},
142142
Spec: batchv1.JobSpec{
@@ -148,7 +148,7 @@ func getSpecCommonPVCCleanupJob(workspaceWithConfig *common.DevWorkspaceWithConf
148148
},
149149
Spec: corev1.PodSpec{
150150
RestartPolicy: "Never",
151-
SecurityContext: wsprovision.GetDevWorkspaceSecurityContext(workspaceWithConfig.Config),
151+
SecurityContext: wsprovision.GetDevWorkspaceSecurityContext(workspace.Config),
152152
Volumes: []corev1.Volume{
153153
{
154154
Name: pvcName,
@@ -191,7 +191,7 @@ func getSpecCommonPVCCleanupJob(workspaceWithConfig *common.DevWorkspaceWithConf
191191
},
192192
}
193193

194-
podTolerations, nodeSelector, err := nsconfig.GetNamespacePodTolerationsAndNodeSelector(workspaceWithConfig.Namespace, clusterAPI)
194+
podTolerations, nodeSelector, err := nsconfig.GetNamespacePodTolerationsAndNodeSelector(workspace.Namespace, clusterAPI)
195195
if err != nil {
196196
return nil, err
197197
}
@@ -202,16 +202,16 @@ func getSpecCommonPVCCleanupJob(workspaceWithConfig *common.DevWorkspaceWithConf
202202
job.Spec.Template.Spec.NodeSelector = nodeSelector
203203
}
204204

205-
if err := controllerutil.SetControllerReference(workspaceWithConfig, job, clusterAPI.Scheme); err != nil {
205+
if err := controllerutil.SetControllerReference(workspace, job, clusterAPI.Scheme); err != nil {
206206
return nil, err
207207
}
208208
return job, nil
209209
}
210210

211-
func commonPVCExists(workspaceWithConfig *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) (bool, error) {
211+
func commonPVCExists(workspace *common.DevWorkspaceWithConfig, clusterAPI sync.ClusterAPI) (bool, error) {
212212
namespacedName := types.NamespacedName{
213-
Name: workspaceWithConfig.Config.Workspace.PVCName,
214-
Namespace: workspaceWithConfig.Namespace,
213+
Name: workspace.Config.Workspace.PVCName,
214+
Namespace: workspace.Namespace,
215215
}
216216
err := clusterAPI.Client.Get(clusterAPI.Ctx, namespacedName, &corev1.PersistentVolumeClaim{})
217217
if err != nil {

0 commit comments

Comments
 (0)