|
1 | 1 | package parquetconverter |
2 | 2 |
|
3 | 3 | import ( |
| 4 | + "bytes" |
4 | 5 | "context" |
| 6 | + "encoding/json" |
5 | 7 | "errors" |
6 | 8 | "fmt" |
7 | 9 | "io" |
@@ -414,3 +416,71 @@ func (m *mockTenantLimits) ByUserID(userID string) *validation.Limits { |
414 | 416 | func (m *mockTenantLimits) AllByUserID() map[string]*validation.Limits { |
415 | 417 | return m.limits |
416 | 418 | } |
| 419 | + |
| 420 | +func TestConverter_SkipBlocksWithExistingValidMarker(t *testing.T) { |
| 421 | + cfg := prepareConfig() |
| 422 | + user := "user" |
| 423 | + ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) |
| 424 | + t.Cleanup(func() { assert.NoError(t, closer.Close()) }) |
| 425 | + dir := t.TempDir() |
| 426 | + |
| 427 | + cfg.Ring.InstanceID = "parquet-converter-1" |
| 428 | + cfg.Ring.InstanceAddr = "1.2.3.4" |
| 429 | + cfg.Ring.KVStore.Mock = ringStore |
| 430 | + bucketClient, err := filesystem.NewBucket(t.TempDir()) |
| 431 | + require.NoError(t, err) |
| 432 | + userBucket := bucket.NewPrefixedBucketClient(bucketClient, user) |
| 433 | + limits := &validation.Limits{} |
| 434 | + flagext.DefaultValues(limits) |
| 435 | + limits.ParquetConverterEnabled = true |
| 436 | + |
| 437 | + c, logger, _ := prepare(t, cfg, objstore.WithNoopInstr(bucketClient), limits, nil) |
| 438 | + |
| 439 | + ctx := context.Background() |
| 440 | + |
| 441 | + lbls := labels.FromStrings("__name__", "test") |
| 442 | + |
| 443 | + // Create a block |
| 444 | + rnd := rand.New(rand.NewSource(time.Now().Unix())) |
| 445 | + blockID, err := e2e.CreateBlock(ctx, rnd, dir, []labels.Labels{lbls}, 2, 0, 2*time.Hour.Milliseconds(), time.Minute.Milliseconds(), 10) |
| 446 | + require.NoError(t, err) |
| 447 | + |
| 448 | + // Upload the block to the bucket |
| 449 | + blockDir := fmt.Sprintf("%s/%s", dir, blockID.String()) |
| 450 | + b, err := tsdb.OpenBlock(nil, blockDir, nil, nil) |
| 451 | + require.NoError(t, err) |
| 452 | + err = block.Upload(ctx, logger, userBucket, b.Dir(), metadata.NoneFunc) |
| 453 | + require.NoError(t, err) |
| 454 | + |
| 455 | + // Write a converter mark with version 1 to simulate an already converted block |
| 456 | + markerV1 := parquet.ConverterMark{ |
| 457 | + Version: parquet.ParquetConverterMarkVersion1, |
| 458 | + } |
| 459 | + markerBytes, err := json.Marshal(markerV1) |
| 460 | + require.NoError(t, err) |
| 461 | + markerPath := path.Join(blockID.String(), parquet.ConverterMarkerFileName) |
| 462 | + err = userBucket.Upload(ctx, markerPath, bytes.NewReader(markerBytes)) |
| 463 | + require.NoError(t, err) |
| 464 | + |
| 465 | + // Verify the marker exists with version 1 |
| 466 | + marker, err := parquet.ReadConverterMark(ctx, blockID, userBucket, logger) |
| 467 | + require.NoError(t, err) |
| 468 | + require.Equal(t, parquet.ParquetConverterMarkVersion1, marker.Version) |
| 469 | + |
| 470 | + // Start the converter |
| 471 | + err = services.StartAndAwaitRunning(context.Background(), c) |
| 472 | + require.NoError(t, err) |
| 473 | + defer services.StopAndAwaitTerminated(ctx, c) // nolint:errcheck |
| 474 | + |
| 475 | + // Wait a bit for the converter to process blocks |
| 476 | + time.Sleep(5 * time.Second) |
| 477 | + |
| 478 | + // Verify the marker version is still 1 (i.e., the block was not converted again) |
| 479 | + markerAfter, err := parquet.ReadConverterMark(ctx, blockID, userBucket, logger) |
| 480 | + require.NoError(t, err) |
| 481 | + require.Equal(t, parquet.ParquetConverterMarkVersion1, markerAfter.Version, "block with existing marker version 1 should not be converted again") |
| 482 | + |
| 483 | + // Verify that no conversion happened by checking the convertedBlocks metric |
| 484 | + // It should be 0 since the block was already converted |
| 485 | + assert.Equal(t, 0.0, testutil.ToFloat64(c.metrics.convertedBlocks.WithLabelValues(user))) |
| 486 | +} |
0 commit comments