Skip to content

Commit 98fc425

Browse files
committed
Get logger from context
Signed-off-by: Yi Chen <[email protected]>
1 parent 8dd8db4 commit 98fc425

File tree

11 files changed

+131
-98
lines changed

11 files changed

+131
-98
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ override LDFLAGS += \
172172
.PHONY: build-operator
173173
build-operator: ## Build Spark operator.
174174
echo "Building spark-operator binary..."
175-
go build -o $(SPARK_OPERATOR) -ldflags '${LDFLAGS}' cmd/operator/main.go
175+
CGO_ENABLED=0 go build -o $(SPARK_OPERATOR) -ldflags '${LDFLAGS}' cmd/operator/main.go
176176

177177
.PHONY: clean
178178
clean: ## Clean binaries.

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ module github.com/kubeflow/spark-operator/v2
33
go 1.24.1
44

55
require (
6+
github.com/go-logr/logr v1.4.2
67
github.com/golang/glog v1.2.4
78
github.com/google/uuid v1.6.0
89
github.com/onsi/ginkgo/v2 v2.22.0
@@ -66,7 +67,6 @@ require (
6667
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
6768
github.com/go-errors/errors v1.5.1 // indirect
6869
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
69-
github.com/go-logr/logr v1.4.2 // indirect
7070
github.com/go-logr/stdr v1.2.2 // indirect
7171
github.com/go-logr/zapr v1.3.0 // indirect
7272
github.com/go-openapi/jsonpointer v0.21.0 // indirect

internal/controller/sparkapplication/controller.go

Lines changed: 61 additions & 46 deletions
Large diffs are not rendered by default.

internal/controller/sparkapplication/driveringress.go

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
"k8s.io/apimachinery/pkg/api/errors"
2929
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3030
"k8s.io/apimachinery/pkg/util/intstr"
31+
"sigs.k8s.io/controller-runtime/pkg/log"
3132

3233
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
3334
"github.com/kubeflow/spark-operator/v2/pkg/common"
@@ -83,18 +84,19 @@ func getDriverIngressURL(ingressURLFormat string, app *v1beta2.SparkApplication)
8384
return parsedURL, nil
8485
}
8586

86-
func (r *Reconciler) createDriverIngress(app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, service SparkService, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) {
87+
func (r *Reconciler) createDriverIngress(ctx context.Context, app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, service SparkService, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) {
8788
if driverIngressConfiguration.ServicePort == nil {
8889
return nil, fmt.Errorf("cannot create Driver Ingress for application %s/%s due to empty ServicePort on driverIngressConfiguration", app.Namespace, app.Name)
8990
}
9091
ingressName := fmt.Sprintf("%s-ing-%d", app.Name, *driverIngressConfiguration.ServicePort)
9192
if util.IngressCapabilities.Has("networking.k8s.io/v1") {
92-
return r.createDriverIngressV1(app, service, ingressName, ingressURL, ingressClassName, []networkingv1.IngressTLS{}, map[string]string{})
93+
return r.createDriverIngressV1(ctx, app, service, ingressName, ingressURL, ingressClassName, []networkingv1.IngressTLS{}, map[string]string{})
9394
}
94-
return r.createDriverIngressLegacy(app, service, ingressName, ingressURL)
95+
return r.createDriverIngressLegacy(ctx, app, service, ingressName, ingressURL)
9596
}
9697

97-
func (r *Reconciler) createDriverIngressV1(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, ingressClassName string, defaultIngressTLS []networkingv1.IngressTLS, defaultIngressAnnotations map[string]string) (*SparkIngress, error) {
98+
func (r *Reconciler) createDriverIngressV1(ctx context.Context, app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, ingressClassName string, defaultIngressTLS []networkingv1.IngressTLS, defaultIngressAnnotations map[string]string) (*SparkIngress, error) {
99+
logger := log.FromContext(ctx)
98100
ingressResourceAnnotations := util.GetWebUIIngressAnnotations(app)
99101
if len(ingressResourceAnnotations) == 0 && len(defaultIngressAnnotations) != 0 {
100102
ingressResourceAnnotations = defaultIngressAnnotations
@@ -160,17 +162,17 @@ func (r *Reconciler) createDriverIngressV1(app *v1beta2.SparkApplication, servic
160162
ingress.Spec.IngressClassName = &ingressClassName
161163
}
162164

163-
if err := r.client.Create(context.TODO(), ingress); err != nil {
165+
if err := r.client.Create(ctx, ingress); err != nil {
164166
if !errors.IsAlreadyExists(err) {
165167
return nil, fmt.Errorf("failed to create ingress %s/%s: %v", ingress.Namespace, ingress.Name, err)
166168
}
167169

168-
if err := r.client.Update(context.TODO(), ingress); err != nil {
170+
if err := r.client.Update(ctx, ingress); err != nil {
169171
return nil, fmt.Errorf("failed to update ingress %s/%s: %v", ingress.Namespace, ingress.Name, err)
170172
}
171-
logger.Info("Updated networking.v1/Ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name)
173+
logger.Info("Updated networking.v1/Ingress for SparkApplication", "ingressName", ingress.Name)
172174
} else {
173-
logger.Info("Created networking.v1/Ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name)
175+
logger.Info("Created networking.v1/Ingress for SparkApplication", "ingressName", ingress.Name)
174176
}
175177

176178
return &SparkIngress{
@@ -182,7 +184,8 @@ func (r *Reconciler) createDriverIngressV1(app *v1beta2.SparkApplication, servic
182184
}, nil
183185
}
184186

185-
func (r *Reconciler) createDriverIngressLegacy(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL) (*SparkIngress, error) {
187+
func (r *Reconciler) createDriverIngressLegacy(ctx context.Context, app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL) (*SparkIngress, error) {
188+
logger := log.FromContext(ctx)
186189
ingressResourceAnnotations := util.GetWebUIIngressAnnotations(app)
187190
// var ingressTLSHosts networkingv1.IngressTLS[]
188191
// That we convert later for extensionsv1beta1, but return as is in SparkIngress.
@@ -236,17 +239,17 @@ func (r *Reconciler) createDriverIngressLegacy(app *v1beta2.SparkApplication, se
236239
if len(ingressTLSHosts) != 0 {
237240
ingress.Spec.TLS = convertIngressTLSHostsToLegacy(ingressTLSHosts)
238241
}
239-
if err := r.client.Create(context.TODO(), ingress); err != nil {
242+
if err := r.client.Create(ctx, ingress); err != nil {
240243
if !errors.IsAlreadyExists(err) {
241244
return nil, fmt.Errorf("failed to create ingress %s/%s: %v", ingress.Namespace, ingress.Name, err)
242245
}
243246

244-
if err := r.client.Update(context.TODO(), ingress); err != nil {
247+
if err := r.client.Update(ctx, ingress); err != nil {
245248
return nil, fmt.Errorf("failed to update ingress %s/%s: %v", ingress.Namespace, ingress.Name, err)
246249
}
247-
logger.Info("Updated extensions.v1beta1/Ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name)
250+
logger.Info("Updated extensions.v1beta1/Ingress for SparkApplication", "ingressName", ingress.Name)
248251
} else {
249-
logger.Info("Created extensions.v1beta1/Ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name)
252+
logger.Info("Created extensions.v1beta1/Ingress for SparkApplication", "ingressName", ingress.Name)
250253
}
251254

252255
return &SparkIngress{
@@ -269,6 +272,7 @@ func convertIngressTLSHostsToLegacy(ingressTLSHosts []networkingv1.IngressTLS) [
269272
}
270273

271274
func (r *Reconciler) createDriverIngressService(
275+
ctx context.Context,
272276
app *v1beta2.SparkApplication,
273277
portName string,
274278
port int32,
@@ -278,6 +282,7 @@ func (r *Reconciler) createDriverIngressService(
278282
serviceAnnotations map[string]string,
279283
serviceLabels map[string]string,
280284
) (*SparkService, error) {
285+
logger := log.FromContext(ctx)
281286
service := &corev1.Service{
282287
ObjectMeta: metav1.ObjectMeta{
283288
Name: serviceName,
@@ -312,18 +317,18 @@ func (r *Reconciler) createDriverIngressService(
312317
service.ObjectMeta.Annotations = serviceAnnotations
313318
}
314319

315-
if err := r.client.Create(context.TODO(), service); err != nil {
320+
if err := r.client.Create(ctx, service); err != nil {
316321
if !errors.IsAlreadyExists(err) {
317322
return nil, err
318323
}
319324

320325
// Update the service if it already exists.
321-
if err := r.client.Update(context.TODO(), service); err != nil {
326+
if err := r.client.Update(ctx, service); err != nil {
322327
return nil, err
323328
}
324-
logger.Info("Updated service for SparkApplication", "name", app.Name, "namespace", app.Namespace, "serviceName", service.Name)
329+
logger.Info("Updated service for SparkApplication", "name", service.Name)
325330
} else {
326-
logger.Info("Created service for SparkApplication", "name", app.Name, "namespace", app.Namespace, "serviceName", service.Name)
331+
logger.Info("Created service for SparkApplication", "name", service.Name)
327332
}
328333

329334
return &SparkService{
@@ -390,6 +395,7 @@ func getDriverIngressServiceLabels(driverIngressConfiguration *v1beta2.DriverIng
390395
}
391396

392397
func (r *Reconciler) createDriverIngressServiceFromConfiguration(
398+
ctx context.Context,
393399
app *v1beta2.SparkApplication,
394400
driverIngressConfiguration *v1beta2.DriverIngressConfiguration,
395401
) (*SparkService, error) {
@@ -402,5 +408,5 @@ func (r *Reconciler) createDriverIngressServiceFromConfiguration(
402408
serviceType := getDriverIngressServiceType(driverIngressConfiguration)
403409
serviceAnnotations := getDriverIngressServiceAnnotations(driverIngressConfiguration)
404410
serviceLabels := getDriverIngressServiceLabels(driverIngressConfiguration)
405-
return r.createDriverIngressService(app, portName, port, port, serviceName, serviceType, serviceAnnotations, serviceLabels)
411+
return r.createDriverIngressService(ctx, app, portName, port, port, serviceName, serviceType, serviceAnnotations, serviceLabels)
406412
}

internal/controller/sparkapplication/driveringress_test.go

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,20 @@ limitations under the License.
1717
package sparkapplication
1818

1919
import (
20+
"context"
2021
"fmt"
2122
"net/url"
2223
"testing"
2324

24-
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
25-
"github.com/kubeflow/spark-operator/v2/pkg/common"
26-
"github.com/kubeflow/spark-operator/v2/pkg/util"
2725
"github.com/stretchr/testify/assert"
2826
corev1 "k8s.io/api/core/v1"
2927
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3028
"k8s.io/apimachinery/pkg/util/intstr"
3129
"sigs.k8s.io/controller-runtime/pkg/client/fake"
30+
31+
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
32+
"github.com/kubeflow/spark-operator/v2/pkg/common"
33+
"github.com/kubeflow/spark-operator/v2/pkg/util"
3234
)
3335

3436
func TestCreateDriverIngressService(t *testing.T) {
@@ -51,7 +53,7 @@ func TestCreateDriverIngressService(t *testing.T) {
5153
}
5254

5355
ingressOptions := tc.app.Spec.DriverIngressOptions[0]
54-
ingressConfig, err := reconciler.createDriverIngressServiceFromConfiguration(tc.app, &ingressOptions)
56+
ingressConfig, err := reconciler.createDriverIngressServiceFromConfiguration(context.TODO(), tc.app, &ingressOptions)
5557

5658
if tc.expectError {
5759
assert.Error(t, err, "Expected an error but got none")
@@ -89,7 +91,7 @@ func TestCreateDriverIngressService(t *testing.T) {
8991
// Test ingress creation
9092
driverUrl, err := url.Parse("http://localhost")
9193
assert.NoError(t, err, "Failed to parse driver ingress url")
92-
_, err = reconciler.createDriverIngress(tc.app, &ingressOptions, *ingressConfig, driverUrl, "ingressClass")
94+
_, err = reconciler.createDriverIngress(context.TODO(), tc.app, &ingressOptions, *ingressConfig, driverUrl, "ingressClass")
9395

9496
if tc.expectError {
9597
assert.Error(t, err, "Expected an error but got none")

internal/controller/sparkapplication/event_filter.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,10 @@ import (
2525
"k8s.io/client-go/tools/record"
2626
"sigs.k8s.io/controller-runtime/pkg/client"
2727
"sigs.k8s.io/controller-runtime/pkg/event"
28+
"sigs.k8s.io/controller-runtime/pkg/log"
2829
"sigs.k8s.io/controller-runtime/pkg/predicate"
2930

31+
"github.com/go-logr/logr"
3032
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
3133
"github.com/kubeflow/spark-operator/v2/pkg/util"
3234
)
@@ -116,6 +118,7 @@ type EventFilter struct {
116118
client client.Client
117119
recorder record.EventRecorder
118120
namespaces map[string]bool
121+
logger logr.Logger
119122
}
120123

121124
var _ predicate.Predicate = &EventFilter{}
@@ -134,6 +137,7 @@ func NewSparkApplicationEventFilter(client client.Client, recorder record.EventR
134137
client: client,
135138
recorder: recorder,
136139
namespaces: nsMap,
140+
logger: log.Log.WithName(""),
137141
}
138142
}
139143

@@ -172,9 +176,9 @@ func (f *EventFilter) Update(e event.UpdateEvent) bool {
172176
if !equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) {
173177
// Force-set the application status to Invalidating which handles clean-up and application re-run.
174178
newApp.Status.AppState.State = v1beta2.ApplicationStateInvalidating
175-
logger.Info("Updating SparkApplication status", "name", newApp.Name, "namespace", newApp.Namespace, " oldState", oldApp.Status.AppState.State, "newState", newApp.Status.AppState.State)
179+
f.logger.Info("Updating SparkApplication status", "name", newApp.Name, "namespace", newApp.Namespace, " oldState", oldApp.Status.AppState.State, "newState", newApp.Status.AppState.State)
176180
if err := f.client.Status().Update(context.TODO(), newApp); err != nil {
177-
logger.Error(err, "Failed to update application status", "application", newApp.Name)
181+
f.logger.Error(err, "Failed to update application status", "application", newApp.Name)
178182
f.recorder.Eventf(
179183
newApp,
180184
corev1.EventTypeWarning,

internal/controller/sparkapplication/event_handler.go

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
"sigs.k8s.io/controller-runtime/pkg/client"
2727
"sigs.k8s.io/controller-runtime/pkg/event"
2828
"sigs.k8s.io/controller-runtime/pkg/handler"
29+
"sigs.k8s.io/controller-runtime/pkg/log"
2930

3031
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
3132
"github.com/kubeflow/spark-operator/v2/internal/metrics"
@@ -53,6 +54,7 @@ func NewSparkPodEventHandler(client client.Client, metrics *metrics.SparkExecuto
5354

5455
// Create implements handler.EventHandler.
5556
func (h *SparkPodEventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.TypedRateLimitingInterface[ctrl.Request]) {
57+
logger := log.FromContext(ctx)
5658
pod, ok := event.Object.(*corev1.Pod)
5759
if !ok {
5860
return
@@ -81,6 +83,7 @@ func (h *SparkPodEventHandler) Update(ctx context.Context, event event.UpdateEve
8183
return
8284
}
8385

86+
logger := log.FromContext(ctx)
8487
logger.Info("Spark pod updated", "name", newPod.Name, "namespace", newPod.Namespace, "oldPhase", oldPod.Status.Phase, "newPhase", newPod.Status.Phase)
8588
h.enqueueSparkAppForUpdate(ctx, newPod, queue)
8689

@@ -96,7 +99,8 @@ func (h *SparkPodEventHandler) Delete(ctx context.Context, event event.DeleteEve
9699
return
97100
}
98101

99-
logger.Info("Spark pod deleted", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase)
102+
logger := log.FromContext(ctx, "pod", pod.Name, "phase", pod.Status.Phase)
103+
logger.Info("Spark pod deleted")
100104
h.enqueueSparkAppForUpdate(ctx, pod, queue)
101105

102106
if h.metrics != nil && util.IsExecutorPod(pod) {
@@ -111,7 +115,8 @@ func (h *SparkPodEventHandler) Generic(ctx context.Context, event event.GenericE
111115
return
112116
}
113117

114-
logger.Info("Spark pod generic event ", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase)
118+
logger := log.FromContext(ctx, "pod", pod.Name, "phase", pod.Status.Phase)
119+
logger.Info("Spark pod generic event ")
115120
h.enqueueSparkAppForUpdate(ctx, pod, queue)
116121
}
117122

@@ -165,7 +170,8 @@ func (h *EventHandler) Create(ctx context.Context, event event.CreateEvent, queu
165170
return
166171
}
167172

168-
logger.Info("SparkApplication created", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State)
173+
logger := log.FromContext(ctx, "namespace", app.Namespace, "name", app.Name)
174+
logger.Info("SparkApplication created")
169175
queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}})
170176

171177
if h.metrics != nil {
@@ -185,6 +191,7 @@ func (h *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queu
185191
return
186192
}
187193

194+
logger := log.FromContext(ctx)
188195
logger.Info("SparkApplication updated", "name", oldApp.Name, "namespace", oldApp.Namespace, "oldState", oldApp.Status.AppState.State, "newState", newApp.Status.AppState.State)
189196
queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: newApp.Name, Namespace: newApp.Namespace}})
190197

@@ -200,7 +207,8 @@ func (h *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queu
200207
return
201208
}
202209

203-
logger.Info("SparkApplication deleted", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State)
210+
logger := log.FromContext(ctx, "name", app.Name, "namespace", app.Namespace)
211+
logger.Info("SparkApplication deleted", "state", app.Status.AppState.State)
204212
queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}})
205213

206214
if h.metrics != nil {
@@ -215,6 +223,7 @@ func (h *EventHandler) Generic(ctx context.Context, event event.GenericEvent, qu
215223
return
216224
}
217225

218-
logger.Info("SparkApplication generic event", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State)
226+
logger := log.FromContext(ctx, "name", app.Name, "namespace", app.Namespace)
227+
logger.Info("SparkApplication generic event", "state", app.Status.AppState.State)
219228
queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}})
220229
}

internal/controller/sparkapplication/monitoring_config.go

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,43 +20,42 @@ import (
2020
"context"
2121
"fmt"
2222

23-
"github.com/golang/glog"
2423
corev1 "k8s.io/api/core/v1"
2524
"k8s.io/apimachinery/pkg/api/errors"
2625
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2726
"k8s.io/apimachinery/pkg/types"
2827
"k8s.io/client-go/util/retry"
2928
"sigs.k8s.io/controller-runtime/pkg/client"
29+
"sigs.k8s.io/controller-runtime/pkg/log"
3030

3131
"github.com/kubeflow/spark-operator/v2/api/v1beta2"
3232
"github.com/kubeflow/spark-operator/v2/pkg/common"
3333
"github.com/kubeflow/spark-operator/v2/pkg/util"
3434
)
3535

36-
func configPrometheusMonitoring(app *v1beta2.SparkApplication, client client.Client) error {
36+
func configPrometheusMonitoring(ctx context.Context, app *v1beta2.SparkApplication, client client.Client) error {
37+
logger := log.FromContext(ctx)
3738
port := common.DefaultPrometheusJavaAgentPort
3839
if app.Spec.Monitoring.Prometheus != nil && app.Spec.Monitoring.Prometheus.Port != nil {
3940
port = *app.Spec.Monitoring.Prometheus.Port
4041
}
4142

4243
// If one or both of the metricsPropertiesFile and Prometheus.ConfigFile are not set
4344
if !util.HasMetricsPropertiesFile(app) || !util.HasPrometheusConfigFile(app) {
44-
logger.V(1).Info("Creating a ConfigMap for metrics and Prometheus configurations")
4545
configMapName := util.GetPrometheusConfigMapName(app)
4646
configMap := buildPrometheusConfigMap(app, configMapName)
4747
key := types.NamespacedName{Namespace: configMap.Namespace, Name: configMap.Name}
4848
if retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
4949
cm := &corev1.ConfigMap{}
50-
if err := client.Get(context.TODO(), key, cm); err != nil {
50+
if err := client.Get(ctx, key, cm); err != nil {
5151
if errors.IsNotFound(err) {
52-
return client.Create(context.TODO(), configMap)
52+
return client.Create(ctx, configMap)
5353
}
5454
return err
5555
}
5656
cm.Data = configMap.Data
57-
return client.Update(context.TODO(), cm)
57+
return client.Update(ctx, cm)
5858
}); retryErr != nil {
59-
logger.Error(retryErr, "Failed to create/update Prometheus ConfigMap for SparkApplication", "name", app.Name, "ConfigMap name", configMap.Name, "namespace", app.Namespace)
6059
return retryErr
6160
}
6261
}
@@ -74,9 +73,8 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, client client.Cli
7473

7574
if util.HasPrometheusConfigFile(app) {
7675
configFile := *app.Spec.Monitoring.Prometheus.ConfigFile
77-
glog.V(2).Infof("Overriding the default Prometheus configuration with config file %s in the Spark image.", configFile)
78-
javaOption = fmt.Sprintf("-javaagent:%s=%d:%s", app.Spec.Monitoring.Prometheus.JmxExporterJar,
79-
port, configFile)
76+
logger.V(1).Info("Overriding the default Prometheus configuration with config file in the Spark image.", "configFile", configFile)
77+
javaOption = fmt.Sprintf("-javaagent:%s=%d:%s", app.Spec.Monitoring.Prometheus.JmxExporterJar, port, configFile)
8078
}
8179

8280
/* work around for push gateway issue: https://github.com/prometheus/pushgateway/issues/97 */

internal/controller/sparkapplication/monitoring_config_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ func TestConfigPrometheusMonitoring(t *testing.T) {
4646
fakeClient := fake.NewFakeClient()
4747

4848
testFn := func(test testcase, t *testing.T) {
49-
err := configPrometheusMonitoring(test.app, fakeClient)
49+
err := configPrometheusMonitoring(context.TODO(), test.app, fakeClient)
5050
assert.NoError(t, err, "failed to configure Prometheus monitoring")
5151

5252
configMapName := util.GetPrometheusConfigMapName(test.app)

0 commit comments

Comments
 (0)