diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java index 47f965909645..b77d8916b205 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppCommonConfig.java @@ -429,6 +429,12 @@ public CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode) { return this; } + @Override + public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) { + setProperty("pipe_enable_memory_checked", String.valueOf(isPipeEnableMemoryCheck)); + return this; + } + @Override public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) { setProperty("pipe_air_gap_receiver_enabled", String.valueOf(isPipeAirGapReceiverEnabled)); diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java index 151e13f1290f..2461c1e6ba2d 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/cluster/config/MppSharedCommonConfig.java @@ -438,6 +438,13 @@ public CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode) { return this; } + @Override + public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) { + dnConfig.setIsPipeEnableMemoryCheck(isPipeEnableMemoryCheck); + cnConfig.setIsPipeEnableMemoryCheck(isPipeEnableMemoryCheck); + return this; + } + @Override public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) { dnConfig.setPipeAirGapReceiverEnabled(isPipeAirGapReceiverEnabled); diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java index 9e82aaf9ebf3..e1de42382b6a 100644 --- a/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/remote/config/RemoteCommonConfig.java @@ -308,6 +308,11 @@ public CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode) { return this; } + @Override + public CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck) { + return this; + } + @Override public CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled) { return this; diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java index 639c61427838..09a4adce9440 100644 --- a/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/CommonConfig.java @@ -138,6 +138,8 @@ CommonConfig setEnableAutoLeaderBalanceForIoTConsensus( CommonConfig setSchemaRegionPerDataNode(double schemaRegionPerDataNode); + CommonConfig setIsPipeEnableMemoryCheck(boolean isPipeEnableMemoryCheck); + CommonConfig setPipeAirGapReceiverEnabled(boolean isPipeAirGapReceiverEnabled); CommonConfig setDriverTaskExecutionTimeSliceInMs(long driverTaskExecutionTimeSliceInMs); diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java index 5b4ae76332f5..13131dc617f1 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadTsFileIT.java @@ -36,6 +36,7 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -901,6 +902,7 @@ public void testLoadLocally() throws Exception { } @Test + @Ignore("Load with conversion is currently banned") public void testLoadWithConvertOnTypeMismatch() throws Exception { List> measurementSchemas = diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java index 1817efdaa1d3..59478fc36e67 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/AbstractPipeDualAutoIT.java @@ -48,13 +48,15 @@ protected void setupConfig() { .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java index 65eb2458df77..9da5591bb2f7 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java @@ -59,14 +59,16 @@ public void setUp() { .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java index d2ca8ce138c2..c8d5bc0b4717 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java @@ -74,7 +74,8 @@ public void setUp() { .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() @@ -84,7 +85,8 @@ public void setUp() { .setSchemaReplicationFactor(3) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS); + .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java index 972131500693..2114ed37c7f1 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java @@ -66,14 +66,17 @@ public void setUp() { .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); + receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setPipeAirGapReceiverEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java index 7fbb0f3a7179..9a8ec2fc18e3 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeDataSinkIT.java @@ -459,7 +459,6 @@ public void testAsyncLoadTsFileWithoutVerify() throws Exception { testLoadTsFileWithoutVerify("async"); } - @Test private void testLoadTsFileWithoutVerify(final String loadTsFileStrategy) throws Exception { final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java index 0b4636c0d137..4b33de0a31c9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeExtractorIT.java @@ -71,13 +71,15 @@ public void setUp() { // Disable sender compaction for tsfile determination in loose range test .setEnableSeqSpaceCompaction(false) .setEnableUnseqSpaceCompaction(false) - .setEnableCrossSpaceCompaction(false); + .setEnableCrossSpaceCompaction(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java index addf0314b400..771d50c97c68 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeIdempotentIT.java @@ -65,13 +65,15 @@ public void setUp() { // of the tested idempotent sql. .setDefaultSchemaRegionGroupNumPerDatabase(1) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java index 125fd6972b17..13a63a585a8d 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProcessorIT.java @@ -59,13 +59,15 @@ public void setUp() { .setAutoCreateSchemaEnabled(true) .setTimestampPrecision("ms") .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java index c5d41f001cb8..1f6467685257 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeProtocolIT.java @@ -74,7 +74,8 @@ private void innerSetUp( .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setDataRegionConsensusProtocolClass(dataRegionConsensus) .setSchemaReplicationFactor(schemaRegionReplicationFactor) - .setDataReplicationFactor(dataRegionReplicationFactor); + .setDataReplicationFactor(dataRegionReplicationFactor) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() @@ -83,7 +84,8 @@ private void innerSetUp( .setSchemaRegionConsensusProtocolClass(schemaRegionConsensus) .setDataRegionConsensusProtocolClass(dataRegionConsensus) .setSchemaReplicationFactor(schemaRegionReplicationFactor) - .setDataReplicationFactor(dataRegionReplicationFactor); + .setDataReplicationFactor(dataRegionReplicationFactor) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java index 563377c927ea..c71e212b629a 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeWithLoadIT.java @@ -61,13 +61,15 @@ public void setUp() { // Disable sender compaction to test mods .setEnableSeqSpaceCompaction(false) .setEnableUnseqSpaceCompaction(false) - .setEnableCrossSpaceCompaction(false); + .setEnableCrossSpaceCompaction(false) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(true) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java index 3e9f51111656..b7091a1db329 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/AbstractPipeDualManualIT.java @@ -48,13 +48,15 @@ protected void setupConfig() { .getCommonConfig() .setAutoCreateSchemaEnabled(false) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); receiverEnv .getConfig() .getCommonConfig() .setAutoCreateSchemaEnabled(false) .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) - .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS); + .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) + .setIsPipeEnableMemoryCheck(false); // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionISessionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionISessionIT.java index 9af757acbdee..fc2b32e145bf 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionISessionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionISessionIT.java @@ -44,6 +44,7 @@ import org.apache.tsfile.write.record.Tablet; import org.apache.tsfile.write.schema.IMeasurementSchema; import org.apache.tsfile.write.schema.MeasurementSchema; +import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -76,6 +77,7 @@ public void insertTablet() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertTabletReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -94,6 +96,7 @@ public void insertAlignedTablet() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertAlignedTabletReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -103,6 +106,7 @@ public void insertAlignedTabletReceiveByTsFile() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertRecordsReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -137,6 +141,7 @@ public void insertRecord() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertRecordReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -177,6 +182,7 @@ public void insertAlignedRecord() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertAlignedRecordReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -225,6 +231,7 @@ public void insertAlignedRecords() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertAlignedRecordsReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -253,6 +260,7 @@ public void insertStringRecordsOfOneDevice() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertStringRecordsOfOneDeviceReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -281,6 +289,7 @@ public void insertAlignedStringRecordsOfOneDevice() { } @Test + @Ignore("The receiver conversion is currently banned, will ignore conflict") public void insertAlignedStringRecordsOfOneDeviceReceiveByTsFile() { prepareTypeConversionTest( (ISession senderSession, ISession receiverSession, Tablet tablet) -> { @@ -381,13 +390,12 @@ private void createDataPipe(String diff, boolean isTSFile) { String sql = String.format( "create pipe test%s" - + " with source ('source'='iotdb-source','source.path'='root.test.**','realtime.mode'='%s','realtime.enable'='%s','history.enable'='%s')" + + " with source ('source'='iotdb-source','source.path'='root.test.**','realtime.mode'='%s','realtime.enable'='%s','history.enable'='true')" + " with processor ('processor'='do-nothing-processor')" + " with sink ('node-urls'='%s:%s','batch.enable'='false','sink.format'='%s')", diff, isTSFile ? "file" : "forced-log", !isTSFile, - isTSFile, receiverEnv.getIP(), receiverEnv.getPort(), isTSFile ? "tsfile" : "tablet"); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionIT.java index 4ebce6b978c5..24b40d2e0c9c 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeTypeConversionIT.java @@ -234,7 +234,7 @@ private void createDataPipe() { String sql = String.format( "create pipe test" - + " with source ('source'='iotdb-source','source.path'='root.test.**','realtime.mode'='forced-log','realtime.enable'='true','history.enable'='false')" + + " with source ('source'='iotdb-source','source.path'='root.test.**','realtime.mode'='forced-log')" + " with processor ('processor'='do-nothing-processor')" + " with sink ('node-urls'='%s:%s','batch.enable'='false','sink.format'='tablet')", receiverEnv.getIP(), receiverEnv.getPort()); diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java index 4c5135764886..9a484426d762 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/single/AbstractPipeSingleIT.java @@ -36,6 +36,7 @@ public void setUp() { env.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); // 10 min, assert that the operations will not time out env.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + env.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); env.initClusterEnvironment(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java index de17dec1ed42..8156139b84ce 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/cluster/IoTDBSubscriptionRestartIT.java @@ -82,7 +82,8 @@ public void setUp() throws Exception { .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS) .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS) .setSchemaReplicationFactor(3) - .setDataReplicationFactor(2); + .setDataReplicationFactor(2) + .setIsPipeEnableMemoryCheck(false); EnvFactory.getEnv().initClusterEnvironment(3, 3); } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java index 594f9efe6915..5e61607ff32b 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java @@ -58,6 +58,9 @@ protected void setUpConfig() { // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); receiverEnv.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); + + senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); + receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); } @Override diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java index 6f8cba43e792..5ad46b10977c 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java @@ -117,8 +117,11 @@ static final class SubscriptionInfo { protected void setUpConfig() { super.setUpConfig(); + senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); + // Enable air gap receiver receiverEnv.getConfig().getCommonConfig().setPipeAirGapReceiverEnabled(true); + receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); } @Override diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java index 4327b7a00f04..1939a986c68f 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java @@ -68,7 +68,9 @@ protected void setUpConfig() { // Set timestamp precision to nanosecond senderEnv.getConfig().getCommonConfig().setTimestampPrecision("ns"); + senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); receiverEnv.getConfig().getCommonConfig().setTimestampPrecision("ns"); + receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); } @Test diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java index b046f09a6da9..37971833b97a 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java @@ -91,6 +91,9 @@ protected void setUpConfig() { .setPipeHeartbeatIntervalSecondsForCollectingPipeMeta(30); senderEnv.getConfig().getCommonConfig().setPipeMetaSyncerInitialSyncDelayMinutes(1); senderEnv.getConfig().getCommonConfig().setPipeMetaSyncerSyncIntervalMinutes(1); + senderEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); + + receiverEnv.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); } @Test diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java index 3f4150519287..7b5640234980 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/AbstractSubscriptionLocalIT.java @@ -33,7 +33,12 @@ public void setUp() throws Exception { super.setUp(); // enable subscription - EnvFactory.getEnv().getConfig().getCommonConfig().setSubscriptionEnabled(true); + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSubscriptionEnabled(true) + .setIsPipeEnableMemoryCheck(false); + EnvFactory.getEnv().initClusterEnvironment(); } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java index ee4a33ca2ed7..e758bcb5a171 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/triple/AbstractSubscriptionTripleIT.java @@ -71,11 +71,14 @@ protected void setUpConfig() { receiver2.getConfig().getCommonConfig().setDnConnectionTimeoutMs(600000); // reduce tsfile batch memory usage - sender.getConfig().getCommonConfig().setSubscriptionPrefetchTsFileBatchMaxDelayInMs(500); sender .getConfig() .getCommonConfig() + .setIsPipeEnableMemoryCheck(false) + .setSubscriptionPrefetchTsFileBatchMaxDelayInMs(500) .setSubscriptionPrefetchTsFileBatchMaxSizeInBytes(32 * 1024); + receiver1.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); + receiver2.getConfig().getCommonConfig().setIsPipeEnableMemoryCheck(false); } @Override diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java index 6ad0c843e274..115dafbc5dc2 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportTsFileTestIT.java @@ -53,7 +53,11 @@ public class ExportTsFileTestIT extends AbstractScriptIT { @BeforeClass public static void setUp() throws Exception { // enable subscription - EnvFactory.getEnv().getConfig().getCommonConfig().setSubscriptionEnabled(true); + EnvFactory.getEnv() + .getConfig() + .getCommonConfig() + .setSubscriptionEnabled(true) + .setIsPipeEnableMemoryCheck(false); EnvFactory.getEnv().initClusterEnvironment(); ip = EnvFactory.getEnv().getIP(); diff --git a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java index 3d0418672427..4c7fffcfba51 100644 --- a/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java +++ b/iotdb-api/pipe-api/src/main/java/org/apache/iotdb/pipe/api/event/dml/insertion/TsFileInsertionEvent.java @@ -21,6 +21,8 @@ import org.apache.iotdb.pipe.api.event.Event; +import java.io.File; + /** * {@link TsFileInsertionEvent} is used to define the event of writing TsFile. Event data stores in * disks, which is compressed and encoded, and requires IO cost for computational processing. @@ -34,4 +36,12 @@ public interface TsFileInsertionEvent extends Event, AutoCloseable { * @return {@code Iterable} the list of {@link TabletInsertionEvent} */ Iterable toTabletInsertionEvents(); + + /** + * Get the file that stores the data of this {@link TsFileInsertionEvent}. The file is compressed + * and encoded, and requires IO cost for computational processing. + * + * @return the file that stores the data of this {@link TsFileInsertionEvent} + */ + File getTsFile(); } diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java index 81f136fadd0a..34f9872bb10a 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/TSStatusCode.java @@ -259,6 +259,7 @@ public enum TSStatusCode { PIPE_RECEIVER_USER_CONFLICT_EXCEPTION(1810), PIPE_CONFIG_RECEIVER_HANDSHAKE_NEEDED(1811), PIPE_TRANSFER_SLICE_OUT_OF_ORDER(1812), + PIPE_PUSH_META_TIMEOUT(1813), // Subscription SUBSCRIPTION_VERSION_ERROR(1900), diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtaskExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtaskExecutor.java index 9aea3edfa38b..1ac64eb246f0 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtaskExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeSubtaskExecutor.java @@ -28,7 +28,7 @@ public class PipeConfigNodeSubtaskExecutor extends PipeSubtaskExecutor { private static final int THREAD_NUM = 1; private PipeConfigNodeSubtaskExecutor() { - super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL, true); + super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL.getName(), true); } /** @@ -36,7 +36,7 @@ private PipeConfigNodeSubtaskExecutor() { */ @TestOnly public PipeConfigNodeSubtaskExecutor(final Object ignored) { - super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL, true); + super(THREAD_NUM, ThreadName.PIPE_CONFIGNODE_EXECUTOR_POOL.getName(), true); } private static class PipeSchemaSubtaskExecutorHolder { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java index ae20141c81f9..b79fd8da94f5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/agent/task/PipeConfigNodeTaskAgent.java @@ -46,11 +46,13 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import java.util.stream.Collectors; public class PipeConfigNodeTaskAgent extends PipeTaskAgent { @@ -248,4 +250,10 @@ protected void collectPipeMetaListInternal( resp.setPipeRemainingEventCountList(pipeRemainingEventCountList); resp.setPipeRemainingTimeList(pipeRemainingTimeList); } + + @Override + public void runPipeTasks( + final Collection pipeTasks, final Consumer runSingle) { + pipeTasks.forEach(runSingle); + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java index 3ab75270b0bb..eb4ffc48c791 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/pipe/AbstractOperatePipeProcedureV2.java @@ -422,12 +422,20 @@ public static Map pushPipeMetaToDataNodes( * @return Error messages for the given pipe after pushing pipe meta */ public static String parsePushPipeMetaExceptionForPipe( - String pipeName, Map respMap) { + final String pipeName, final Map respMap) { final StringBuilder exceptionMessageBuilder = new StringBuilder(); - for (Map.Entry respEntry : respMap.entrySet()) { - int dataNodeId = respEntry.getKey(); - TPushPipeMetaResp resp = respEntry.getValue(); + for (final Map.Entry respEntry : respMap.entrySet()) { + final int dataNodeId = respEntry.getKey(); + final TPushPipeMetaResp resp = respEntry.getValue(); + + if (resp.getStatus().getCode() == TSStatusCode.PIPE_PUSH_META_TIMEOUT.getStatusCode()) { + exceptionMessageBuilder.append( + String.format( + "DataNodeId: %s, Message: Timeout to wait for lock while processing pushPipeMeta on dataNodes.", + dataNodeId)); + continue; + } if (resp.getStatus().getCode() == TSStatusCode.PIPE_PUSH_META_ERROR.getStatusCode()) { if (!resp.isSetExceptionMessages()) { @@ -438,7 +446,7 @@ public static String parsePushPipeMetaExceptionForPipe( continue; } - AtomicBoolean hasException = new AtomicBoolean(false); + final AtomicBoolean hasException = new AtomicBoolean(false); resp.getExceptionMessages() .forEach( diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java index c2c04f945299..8854ee647f99 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/pipe/connector/PipeConfigNodeThriftRequestTest.java @@ -44,7 +44,6 @@ public void testPipeTransferConfigHandshakeReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getTimestampPrecision(), deserializeReq.getTimestampPrecision()); } @@ -57,7 +56,6 @@ public void testPipeTransferConfigPlanReq() { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); } @Test @@ -72,7 +70,6 @@ public void testPipeTransferConfigSnapshotPieceReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileName(), deserializeReq.getFileName()); Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset()); @@ -95,7 +92,6 @@ public void testPipeTransferConfigSnapshotSealReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileNames(), deserializeReq.getFileNames()); Assert.assertEquals(req.getFileLengths(), deserializeReq.getFileLengths()); diff --git a/iotdb-core/datanode/src/assembly/resources/tools/tsfile/mark-is-generated-by-pipe.bat b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/mark-is-generated-by-pipe.bat new file mode 100644 index 000000000000..7d2b867bba9a --- /dev/null +++ b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/mark-is-generated-by-pipe.bat @@ -0,0 +1,59 @@ +@REM +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM + +@echo off +echo ```````````````````````````````````````````````````````````````````````` +echo Starting Validating the isGeneratedByPipe Mark in TsFile Resources +echo ```````````````````````````````````````````````````````````````````````` + +if "%OS%" == "Windows_NT" setlocal + +pushd %~dp0..\.. +if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% +popd + +if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.db.tools.validate.TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool +if NOT DEFINED JAVA_HOME goto :err + +@REM ----------------------------------------------------------------------------- +@REM ***** CLASSPATH library setting ***** +@REM Ensure that any user defined CLASSPATH variables are not used on startup +set CLASSPATH="%IOTDB_HOME%\lib\*" + +goto okClasspath + +:append +set CLASSPATH=%CLASSPATH%;%1 +goto :eof + +@REM ----------------------------------------------------------------------------- +:okClasspath + +"%JAVA_HOME%\bin\java" -cp "%CLASSPATH%" %MAIN_CLASS% %* + +goto finally + +:err +echo JAVA_HOME environment variable must be set! +pause + +@REM ----------------------------------------------------------------------------- +:finally + +ENDLOCAL \ No newline at end of file diff --git a/iotdb-core/datanode/src/assembly/resources/tools/tsfile/mark-is-generated-by-pipe.sh b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/mark-is-generated-by-pipe.sh new file mode 100644 index 000000000000..daabae671f3e --- /dev/null +++ b/iotdb-core/datanode/src/assembly/resources/tools/tsfile/mark-is-generated-by-pipe.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +echo ------------------------------------------------------------------------------------ +echo Starting Validating the isGeneratedByPipe Mark in TsFile Resources +echo ------------------------------------------------------------------------------------ + +source "$(dirname "$0")/../../sbin/iotdb-common.sh" +#get_iotdb_include and checkAllVariables is in iotdb-common.sh +VARS=$(get_iotdb_include "$*") +checkAllVariables +export IOTDB_HOME="${IOTDB_HOME}/.." +eval set -- "$VARS" + +if [ -n "$JAVA_HOME" ]; then + for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do + if [ -x "$java" ]; then + JAVA="$java" + break + fi + done +else + JAVA=java +fi + +CLASSPATH="" +for f in ${IOTDB_HOME}/lib/*.jar; do + CLASSPATH=${CLASSPATH}":"$f +done + +MAIN_CLASS=org.apache.iotdb.db.tools.validate.TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool + +"$JAVA" -cp "$CLASSPATH" "$MAIN_CLASS" "$@" +exit $? diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java index 446a428eee76..890af218a594 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java @@ -314,6 +314,8 @@ public class IoTDBConfig { private String extPipeDir = IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.EXT_PIPE_FOLDER_NAME; + private int pipeTaskThreadCount = 5; + /** External lib directory for MQTT, stores user-uploaded JAR files */ private String mqttDir = IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.MQTT_FOLDER_NAME; @@ -1046,6 +1048,8 @@ public class IoTDBConfig { /** Policy of DataNodeSchemaCache eviction */ private String dataNodeSchemaCacheEvictionPolicy = "FIFO"; + private int schemaThreadCount = 5; + private String readConsistencyLevel = "strong"; /** Maximum execution time of a DriverTask */ @@ -1147,8 +1151,6 @@ public class IoTDBConfig { private long loadTsFileTabletConversionBatchMemorySizeInBytes = 4096 * 1024; - private int loadTsFileTabletConversionThreadCount = 5; - private long loadChunkMetadataMemorySizeInBytes = 33554432; // 32MB private long loadMemoryAllocateRetryIntervalMs = 1000L; @@ -3345,6 +3347,14 @@ public void setExtPipeDir(String extPipeDir) { this.extPipeDir = extPipeDir; } + public int getPipeTaskThreadCount() { + return pipeTaskThreadCount; + } + + public void setPipeTaskThreadCount(int pipeTaskThreadCount) { + this.pipeTaskThreadCount = pipeTaskThreadCount; + } + public void setPartitionCacheSize(int partitionCacheSize) { this.partitionCacheSize = partitionCacheSize; } @@ -3481,6 +3491,14 @@ public void setDataNodeSchemaCacheEvictionPolicy(String dataNodeSchemaCacheEvict this.dataNodeSchemaCacheEvictionPolicy = dataNodeSchemaCacheEvictionPolicy; } + public int getSchemaThreadCount() { + return schemaThreadCount; + } + + public void setSchemaThreadCount(int schemaThreadCount) { + this.schemaThreadCount = schemaThreadCount; + } + public String getReadConsistencyLevel() { return readConsistencyLevel; } @@ -4026,14 +4044,6 @@ public void setLoadTsFileTabletConversionBatchMemorySizeInBytes( loadTsFileTabletConversionBatchMemorySizeInBytes; } - public int getLoadTsFileTabletConversionThreadCount() { - return loadTsFileTabletConversionThreadCount; - } - - public void setLoadTsFileTabletConversionThreadCount(int loadTsFileTabletConversionThreadCount) { - this.loadTsFileTabletConversionThreadCount = loadTsFileTabletConversionThreadCount; - } - public long getLoadChunkMetadataMemorySizeInBytes() { return loadChunkMetadataMemorySizeInBytes; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java index 25f3095d9a05..7fbd9ce2ed79 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java @@ -955,6 +955,10 @@ public void loadProperties(TrimProperties properties) throws BadNodeUrlException } conf.setExtPipeDir(properties.getProperty("ext_pipe_dir", conf.getExtPipeDir()).trim()); + conf.setPipeTaskThreadCount( + Integer.parseInt( + properties.getProperty( + "pipe_task_thread_count", Integer.toString(conf.getPipeTaskThreadCount()).trim()))); // At the same time, set TSFileConfig List fsTypes = new ArrayList<>(); @@ -1091,6 +1095,11 @@ public void loadProperties(TrimProperties properties) throws BadNodeUrlException properties.getProperty( "datanode_schema_cache_eviction_policy", conf.getDataNodeSchemaCacheEvictionPolicy())); + conf.setSchemaThreadCount( + Integer.parseInt( + properties.getProperty( + "schema_thread_count", Integer.toString(conf.getSchemaThreadCount())))); + loadIoTConsensusProps(properties); loadPipeConsensusProps(properties); @@ -2386,11 +2395,6 @@ private void loadLoadTsFileProps(TrimProperties properties) throws IOException { properties.getProperty( "load_tsfile_tablet_conversion_batch_memory_size_in_bytes", String.valueOf(conf.getLoadTsFileTabletConversionBatchMemorySizeInBytes())))); - conf.setLoadTsFileTabletConversionThreadCount( - Integer.parseInt( - properties.getProperty( - "load_tsfile_tablet_conversion_thread_count", - String.valueOf(conf.getLoadTsFileTabletConversionThreadCount())))); conf.setLoadChunkMetadataMemorySizeInBytes( Long.parseLong( Optional.ofNullable( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java index 96e2e4e72994..089d4e2fda72 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeAgentLauncher.java @@ -36,7 +36,6 @@ import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; import org.apache.iotdb.db.protocol.client.ConfigNodeInfo; import org.apache.iotdb.db.service.ResourcesInformationHolder; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache; import org.apache.iotdb.pipe.api.exception.PipeException; import org.apache.iotdb.rpc.TSStatusCode; @@ -161,7 +160,6 @@ public static synchronized void launchPipeTaskAgent() { try (final ConfigNodeClient configNodeClient = ConfigNodeClientManager.getInstance().borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { final TGetAllPipeInfoResp getAllPipeInfoResp = configNodeClient.getAllPipeInfo(); - WALInsertNodeCache.init(); PipeTabletEventBatch.init(); if (getAllPipeInfoResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { throw new StartupException("Failed to get pipe task meta from config node."); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java index 5a1f9d8b8456..afb05fc2c39b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/runtime/PipeDataNodeRuntimeAgent.java @@ -35,7 +35,6 @@ import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.event.common.terminate.PipeTerminateEvent; import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningQueue; import org.apache.iotdb.db.pipe.resource.PipeDataNodeHardlinkOrCopiedFileDirStartupCleaner; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; @@ -86,15 +85,6 @@ public synchronized void start() throws StartupException { PipeConfig.getInstance().printAllConfigs(); PipeAgentLauncher.launchPipeTaskAgent(); - registerPeriodicalJob( - "PipeTaskAgent#restartAllStuckPipes", - PipeDataNodeAgent.task()::restartAllStuckPipes, - PipeConfig.getInstance().getPipeStuckRestartIntervalSeconds()); - registerPeriodicalJob( - "PipeTaskAgent#flushDataRegionIfNeeded", - PipeTerminateEvent::flushDataRegionIfNeeded, - PipeConfig.getInstance().getPipeFlushAfterLastTerminateSeconds()); - pipePeriodicalJobExecutor.start(); if (PipeConfig.getInstance().getPipeEventReferenceTrackingEnabled()) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java index b840b9274aa4..f800c15c3f8a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/PipeDataNodeTaskAgent.java @@ -21,6 +21,10 @@ import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.concurrent.IoTThreadFactory; +import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.consensus.DataRegionId; import org.apache.iotdb.commons.consensus.SchemaRegionId; import org.apache.iotdb.commons.consensus.index.ProgressIndex; @@ -36,8 +40,7 @@ import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; -import org.apache.iotdb.commons.service.metric.MetricService; -import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; import org.apache.iotdb.consensus.exception.ConsensusException; import org.apache.iotdb.consensus.pipe.consensuspipe.ConsensusPipeName; import org.apache.iotdb.db.conf.IoTDBConfig; @@ -47,12 +50,10 @@ import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeBuilder; import org.apache.iotdb.db.pipe.agent.task.builder.PipeDataNodeTaskBuilder; import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter; -import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeInsertionDataNodeListener; import org.apache.iotdb.db.pipe.extractor.schemaregion.SchemaRegionListeningFilter; import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; -import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; import org.apache.iotdb.db.protocol.client.ConfigNodeClient; @@ -62,10 +63,7 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.pipe.PipeOperateSchemaQueueNode; import org.apache.iotdb.db.schemaengine.SchemaEngine; import org.apache.iotdb.db.storageengine.StorageEngine; -import org.apache.iotdb.db.storageengine.dataregion.wal.WALManager; import org.apache.iotdb.db.subscription.agent.SubscriptionAgent; -import org.apache.iotdb.metrics.utils.MetricLevel; -import org.apache.iotdb.metrics.utils.SystemMetric; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatResp; import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq; import org.apache.iotdb.mpp.rpc.thrift.TPushPipeMetaRespExceptionMessage; @@ -83,6 +81,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -91,21 +90,52 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_PATH_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_PATTERN_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_REALTIME_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.EXTRACTOR_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_END_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_HISTORY_START_TIME_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_PATH_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_PATTERN_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_REALTIME_ENABLE_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant.SOURCE_START_TIME_KEY; + public class PipeDataNodeTaskAgent extends PipeTaskAgent { private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataNodeTaskAgent.class); protected static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig(); - private static final AtomicLong LAST_FORCED_RESTART_TIME = - new AtomicLong(System.currentTimeMillis()); - private static final Map PIPE_NAME_TO_LAST_RESTART_TIME_MAP = - new ConcurrentHashMap<>(); + private final ExecutorService pipeExecutor = + new WrappedThreadPoolExecutor( + 0, + IoTDBDescriptor.getInstance().getConfig().getPipeTaskThreadCount(), + 0L, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>( + IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount()), + new IoTThreadFactory(ThreadName.PIPE_PARALLEL_EXECUTION_POOL.getName()), + ThreadName.PIPE_PARALLEL_EXECUTION_POOL.getName(), + new ThreadPoolExecutor.CallerRunsPolicy()); ////////////////////////// Pipe Task Management Entry ////////////////////////// @@ -120,54 +150,6 @@ protected Map buildPipeTasks(final PipeMeta pipeMetaFromConfi return new PipeDataNodeBuilder(pipeMetaFromConfigNode).build(); } - ////////////////////////// Manage by Pipe Name ////////////////////////// - - @Override - protected void startPipe(final String pipeName, final long creationTime) { - final PipeMeta existedPipeMeta = pipeMetaKeeper.getPipeMeta(pipeName); - final PipeStatus status = existedPipeMeta.getRuntimeMeta().getStatus().get(); - if (PipeStatus.STOPPED.equals(status) || status == null) { - restartPipeToReloadResourceIfNeeded(existedPipeMeta); - } - - super.startPipe(pipeName, creationTime); - } - - private void restartPipeToReloadResourceIfNeeded(final PipeMeta pipeMeta) { - if (System.currentTimeMillis() - pipeMeta.getStaticMeta().getCreationTime() - < PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs()) { - return; - } - - final AtomicLong lastRestartTime = - PIPE_NAME_TO_LAST_RESTART_TIME_MAP.get(pipeMeta.getStaticMeta().getPipeName()); - if (lastRestartTime != null - && System.currentTimeMillis() - lastRestartTime.get() - < PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs()) { - LOGGER.info( - "Skipping reload resource for stopped pipe {} before starting it because reloading resource is too frequent.", - pipeMeta.getStaticMeta().getPipeName()); - return; - } - - if (PIPE_NAME_TO_LAST_RESTART_TIME_MAP.isEmpty()) { - LOGGER.info( - "Flushing storage engine before restarting pipe {}.", - pipeMeta.getStaticMeta().getPipeName()); - final long currentTime = System.currentTimeMillis(); - StorageEngine.getInstance().syncCloseAllProcessor(); - WALManager.getInstance().syncDeleteOutdatedFilesInWALNodes(); - LOGGER.info( - "Finished flushing storage engine, time cost: {} ms.", - System.currentTimeMillis() - currentTime); - } - - restartStuckPipe(pipeMeta); - LOGGER.info( - "Reloaded resource for stopped pipe {} before starting it.", - pipeMeta.getStaticMeta().getPipeName()); - } - ///////////////////////// Manage by regionGroupId ///////////////////////// @Override @@ -383,9 +365,8 @@ public void stopAllPipesWithCriticalException() { ///////////////////////// Heartbeat ///////////////////////// public void collectPipeMetaList(final TDataNodeHeartbeatResp resp) throws TException { - // Try the lock instead of directly acquire it to prevent the block of the cluster heartbeat - // 10s is the half of the HEARTBEAT_TIMEOUT_TIME defined in class BaseNodeCache in ConfigNode - if (!tryReadLockWithTimeOut(10)) { + if (!tryReadLockWithTimeOut( + CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() * 2L / 3)) { return; } try { @@ -559,226 +540,6 @@ protected void collectPipeMetaListInternal( PipeInsertionDataNodeListener.getInstance().listenToHeartbeat(true); } - ///////////////////////// Restart Logic ///////////////////////// - - public void restartAllStuckPipes() { - final List removedPipeName = removeOutdatedPipeInfoFromLastRestartTimeMap(); - if (!removedPipeName.isEmpty()) { - final long currentTime = System.currentTimeMillis(); - LOGGER.info( - "Pipes {} now can dynamically adjust their extraction strategies. " - + "Start to flush storage engine to trigger the adjustment.", - removedPipeName); - StorageEngine.getInstance().syncCloseAllProcessor(); - WALManager.getInstance().syncDeleteOutdatedFilesInWALNodes(); - LOGGER.info( - "Finished flushing storage engine, time cost: {} ms.", - System.currentTimeMillis() - currentTime); - LOGGER.info("Skipping restarting pipes this round because of the dynamic flushing."); - return; - } - - if (!tryWriteLockWithTimeOut(5)) { - return; - } - - final Set stuckPipes; - try { - stuckPipes = findAllStuckPipes(); - } finally { - releaseWriteLock(); - } - - // If the pipe has been restarted recently, skip it. - stuckPipes.removeIf( - pipeMeta -> { - final AtomicLong lastRestartTime = - PIPE_NAME_TO_LAST_RESTART_TIME_MAP.get(pipeMeta.getStaticMeta().getPipeName()); - return lastRestartTime != null - && System.currentTimeMillis() - lastRestartTime.get() - < PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs(); - }); - - // Restart all stuck pipes. - // Note that parallelStream cannot be used here. The method PipeTaskAgent#dropPipe also uses - // parallelStream. If parallelStream is used here, the subtasks generated inside the dropPipe - // may not be scheduled by the worker thread of ForkJoinPool because of less available threads, - // and the parent task will wait for the completion of the subtasks in ForkJoinPool forever, - // causing the deadlock. - stuckPipes.forEach(this::restartStuckPipe); - } - - private List removeOutdatedPipeInfoFromLastRestartTimeMap() { - final List removedPipeName = new ArrayList<>(); - PIPE_NAME_TO_LAST_RESTART_TIME_MAP - .entrySet() - .removeIf( - entry -> { - final AtomicLong lastRestartTime = entry.getValue(); - final boolean shouldRemove = - lastRestartTime == null - || PipeConfig.getInstance().getPipeStuckRestartMinIntervalMs() - <= System.currentTimeMillis() - lastRestartTime.get(); - if (shouldRemove) { - removedPipeName.add(entry.getKey()); - } - return shouldRemove; - }); - return removedPipeName; - } - - private Set findAllStuckPipes() { - final Set stuckPipes = new HashSet<>(); - - if (System.currentTimeMillis() - LAST_FORCED_RESTART_TIME.get() - > PipeConfig.getInstance().getPipeSubtaskExecutorForcedRestartIntervalMs()) { - LAST_FORCED_RESTART_TIME.set(System.currentTimeMillis()); - for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) { - stuckPipes.add(pipeMeta); - } - if (!stuckPipes.isEmpty()) { - LOGGER.warn( - "All {} pipe(s) will be restarted because of forced restart policy.", - stuckPipes.size()); - } - return stuckPipes; - } - - final long totalLinkedButDeletedTsFileResourceRamSize = - PipeDataNodeResourceManager.tsfile().getTotalLinkedButDeletedTsFileResourceRamSize(); - final long totalInsertNodeFloatingMemoryUsageInBytes = getAllFloatingMemoryUsageInByte(); - final long totalFloatingMemorySizeInBytes = - PipeMemoryManager.getTotalFloatingMemorySizeInBytes(); - if (totalInsertNodeFloatingMemoryUsageInBytes + totalLinkedButDeletedTsFileResourceRamSize - >= totalFloatingMemorySizeInBytes) { - for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) { - stuckPipes.add(pipeMeta); - } - if (!stuckPipes.isEmpty()) { - LOGGER.warn( - "All {} pipe(s) will be restarted because linked but deleted tsFiles' resource size {} and all insertNode's size {} exceeds limit {}.", - stuckPipes.size(), - totalLinkedButDeletedTsFileResourceRamSize, - totalInsertNodeFloatingMemoryUsageInBytes, - totalFloatingMemorySizeInBytes); - } - return stuckPipes; - } - - final Map taskId2ExtractorMap = - PipeDataRegionExtractorMetrics.getInstance().getExtractorMap(); - for (final PipeMeta pipeMeta : pipeMetaKeeper.getPipeMetaList()) { - final String pipeName = pipeMeta.getStaticMeta().getPipeName(); - final List extractors = - taskId2ExtractorMap.values().stream() - .filter(e -> e.getPipeName().equals(pipeName) && e.shouldExtractInsertion()) - .collect(Collectors.toList()); - - if (extractors.isEmpty()) { - continue; - } - - // Extractors of this pipe might not pin too much MemTables, - // still need to check if linked-and-deleted TsFile count exceeds limit. - // Typically, if deleted tsFiles are too abundant all pipes may need to restart. - if ((CONFIG.isEnableSeqSpaceCompaction() - || CONFIG.isEnableUnseqSpaceCompaction() - || CONFIG.isEnableCrossSpaceCompaction()) - && mayDeletedTsFileSizeReachDangerousThreshold()) { - LOGGER.warn( - "Pipe {} needs to restart because too many TsFiles are out-of-date.", - pipeMeta.getStaticMeta()); - stuckPipes.add(pipeMeta); - continue; - } - - // Try to restart the stream mode pipes for releasing memTables. - if (extractors.get(0).isStreamMode()) { - if (extractors.stream().anyMatch(IoTDBDataRegionExtractor::hasConsumedAllHistoricalTsFiles) - && (mayMemTablePinnedCountReachDangerousThreshold() - || mayWalSizeReachThrottleThreshold())) { - // Extractors of this pipe may be stuck and is pinning too many MemTables. - LOGGER.warn( - "Pipe {} needs to restart because too many memTables are pinned or the WAL size is too large. mayMemTablePinnedCountReachDangerousThreshold: {}, mayWalSizeReachThrottleThreshold: {}", - pipeMeta.getStaticMeta(), - mayMemTablePinnedCountReachDangerousThreshold(), - mayWalSizeReachThrottleThreshold()); - stuckPipes.add(pipeMeta); - } - } - } - - return stuckPipes; - } - - private boolean mayDeletedTsFileSizeReachDangerousThreshold() { - try { - final long linkedButDeletedTsFileSize = - PipeDataNodeResourceManager.tsfile().getTotalLinkedButDeletedTsfileSize(); - final double totalDisk = - MetricService.getInstance() - .getAutoGauge( - SystemMetric.SYS_DISK_TOTAL_SPACE.toString(), - MetricLevel.CORE, - Tag.NAME.toString(), - // This "system" should stay the same with the one in - // DataNodeInternalRPCServiceImpl. - "system") - .getValue(); - return linkedButDeletedTsFileSize > 0 - && totalDisk > 0 - && linkedButDeletedTsFileSize - > PipeConfig.getInstance().getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage() - * totalDisk; - } catch (final Exception e) { - LOGGER.warn("Failed to judge if deleted TsFile size reaches dangerous threshold.", e); - return false; - } - } - - private boolean mayMemTablePinnedCountReachDangerousThreshold() { - return PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount() != Integer.MAX_VALUE - && PipeDataNodeResourceManager.wal().getPinnedWalCount() - >= 5 - * PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount() - * StorageEngine.getInstance().getDataRegionNumber(); - } - - private boolean mayWalSizeReachThrottleThreshold() { - return 3 * WALManager.getInstance().getTotalDiskUsage() > 2 * CONFIG.getThrottleThreshold(); - } - - private void restartStuckPipe(final PipeMeta pipeMeta) { - LOGGER.warn( - "Pipe {} will be restarted because it is stuck or has encountered issues such as data backlog or being stopped for too long.", - pipeMeta.getStaticMeta()); - acquireWriteLock(); - try { - final long startTime = System.currentTimeMillis(); - final PipeMeta originalPipeMeta = pipeMeta.deepCopy4TaskAgent(); - handleDropPipe(pipeMeta.getStaticMeta().getPipeName()); - - final long restartTime = System.currentTimeMillis(); - PIPE_NAME_TO_LAST_RESTART_TIME_MAP - .computeIfAbsent(pipeMeta.getStaticMeta().getPipeName(), k -> new AtomicLong(restartTime)) - .set(restartTime); - handleSinglePipeMetaChanges(originalPipeMeta); - - LOGGER.warn( - "Pipe {} was restarted because of stuck or data backlog, time cost: {} ms.", - originalPipeMeta.getStaticMeta(), - System.currentTimeMillis() - startTime); - } catch (final Exception e) { - LOGGER.warn("Failed to restart stuck pipe {}.", pipeMeta.getStaticMeta(), e); - } finally { - releaseWriteLock(); - } - } - - public boolean isPipeTaskCurrentlyRestarted(final String pipeName) { - return PIPE_NAME_TO_LAST_RESTART_TIME_MAP.containsKey(pipeName); - } - ///////////////////////// Terminate Logic ///////////////////////// public void markCompleted(final String pipeName, final int regionId) { @@ -822,6 +583,24 @@ public boolean hasPipeReleaseRegionRelatedResource(final int consensusGroupId) { } } + @Override + public void runPipeTasks( + final Collection pipeTasks, final Consumer runSingle) { + final Set> pipeFuture = new HashSet<>(); + + pipeTasks.forEach( + pipeTask -> pipeFuture.add(pipeExecutor.submit(() -> runSingle.accept(pipeTask)))); + + for (final Future future : pipeFuture) { + try { + future.get(); + } catch (final ExecutionException | InterruptedException e) { + LOGGER.warn("Exception occurs when executing pipe task: ", e); + throw new PipeException(e.toString()); + } + } + } + ///////////////////////// Shutdown Logic ///////////////////////// public void persistAllProgressIndex() { @@ -885,4 +664,203 @@ public Map getAllConsensusPipe() { releaseReadLock(); } } + + @Override + protected void calculateMemoryUsage( + final PipeParameters extractorParameters, + final PipeParameters processorParameters, + final PipeParameters connectorParameters) { + if (!PipeConfig.getInstance().isPipeEnableMemoryCheck()) { + return; + } + + calculateInsertNodeQueueMemory(extractorParameters, processorParameters, connectorParameters); + + long needMemory = 0; + + needMemory += + calculateTsFileParserMemory(extractorParameters, processorParameters, connectorParameters); + needMemory += + calculateSinkBatchMemory(extractorParameters, processorParameters, connectorParameters); + needMemory += + calculateSendTsFileReadBufferMemory( + extractorParameters, processorParameters, connectorParameters); + + PipeMemoryManager pipeMemoryManager = PipeDataNodeResourceManager.memory(); + final long freeMemorySizeInBytes = pipeMemoryManager.getFreeMemorySizeInBytes(); + final long reservedMemorySizeInBytes = + (long) + (PipeMemoryManager.getTotalMemorySizeInBytes() + * PipeConfig.getInstance().getReservedMemoryPercentage()); + if (freeMemorySizeInBytes < needMemory + reservedMemorySizeInBytes) { + final String message = + String.format( + "Not enough memory for pipe. Need memory: %d bytes, free memory: %d bytes, reserved memory: %d bytes, total memory: %d bytes", + needMemory, + freeMemorySizeInBytes, + freeMemorySizeInBytes, + PipeMemoryManager.getTotalMemorySizeInBytes()); + LOGGER.warn(message); + throw new PipeException(message); + } + } + + private void calculateInsertNodeQueueMemory( + final PipeParameters extractorParameters, + final PipeParameters processorParameters, + final PipeParameters connectorParameters) { + + // Realtime extractor is enabled by default, so we only need to check the source realtime + if (!extractorParameters.getBooleanOrDefault( + Arrays.asList(EXTRACTOR_REALTIME_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY), + EXTRACTOR_REALTIME_ENABLE_DEFAULT_VALUE)) { + return; + } + + // If the realtime mode is batch or file, we do not need to allocate memory + final String realtimeMode = + extractorParameters.getStringByKeys( + PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_KEY, + PipeExtractorConstant.SOURCE_REALTIME_MODE_KEY); + if (PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_BATCH_MODE_VALUE.equals(realtimeMode) + || PipeExtractorConstant.EXTRACTOR_REALTIME_MODE_FILE_VALUE.equals(realtimeMode)) { + return; + } + + final long allocatedMemorySizeInBytes = this.getAllFloatingMemoryUsageInByte(); + final long remainingMemory = + PipeMemoryManager.getTotalFloatingMemorySizeInBytes() - allocatedMemorySizeInBytes; + if (remainingMemory < PipeConfig.getInstance().PipeInsertNodeQueueMemory()) { + final String message = + String.format( + "Not enough memory for pipe. Need Floating memory: %d bytes, free Floating memory: %d bytes", + PipeConfig.getInstance().PipeInsertNodeQueueMemory(), remainingMemory); + LOGGER.warn(message); + throw new PipeException(message); + } + } + + private long calculateTsFileParserMemory( + final PipeParameters extractorParameters, + final PipeParameters processorParameters, + final PipeParameters connectorParameters) { + + // If the extractor is not history, we do not need to allocate memory + boolean isExtractorHistory = + extractorParameters.getBooleanOrDefault( + SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE) + || extractorParameters.getBooleanOrDefault( + Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY), + EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE); + + // If the extractor is history, and has start/end time, we need to allocate memory + boolean isTSFileParser = + isExtractorHistory + && extractorParameters.hasAnyAttributes( + EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY); + + isTSFileParser = + isTSFileParser + || (isExtractorHistory + && extractorParameters.hasAnyAttributes( + EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY)); + + // if the extractor has start/end time, we need to allocate memory + isTSFileParser = + isTSFileParser + || extractorParameters.hasAnyAttributes( + SOURCE_START_TIME_KEY, EXTRACTOR_START_TIME_KEY); + + isTSFileParser = + isTSFileParser + || extractorParameters.hasAnyAttributes(SOURCE_END_TIME_KEY, EXTRACTOR_END_TIME_KEY); + + // If the extractor has pattern or path, we need to allocate memory + isTSFileParser = + isTSFileParser + || extractorParameters.hasAnyAttributes(EXTRACTOR_PATTERN_KEY, SOURCE_PATTERN_KEY); + + isTSFileParser = + isTSFileParser || extractorParameters.hasAnyAttributes(EXTRACTOR_PATH_KEY, SOURCE_PATH_KEY); + + // If the extractor is not hybrid, we do need to allocate memory + isTSFileParser = + isTSFileParser + || !PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE.equals( + connectorParameters.getStringOrDefault( + Arrays.asList( + PipeConnectorConstant.CONNECTOR_FORMAT_KEY, + PipeConnectorConstant.SINK_FORMAT_KEY), + PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE)); + + if (!isTSFileParser) { + return 0; + } + + return PipeConfig.getInstance().getTsFileParserMemory(); + } + + private long calculateSinkBatchMemory( + final PipeParameters extractorParameters, + final PipeParameters processorParameters, + final PipeParameters connectorParameters) { + + // If the connector format is tsfile , we need to use batch + boolean needUseBatch = + PipeConnectorConstant.CONNECTOR_FORMAT_TS_FILE_VALUE.equals( + connectorParameters.getStringOrDefault( + Arrays.asList( + PipeConnectorConstant.CONNECTOR_FORMAT_KEY, + PipeConnectorConstant.SINK_FORMAT_KEY), + PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE)); + + if (needUseBatch) { + return PipeConfig.getInstance().getSinkBatchMemoryTsFile(); + } + + // If the connector is batch mode, we need to use batch + needUseBatch = + connectorParameters.getBooleanOrDefault( + Arrays.asList( + PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_KEY, + PipeConnectorConstant.SINK_IOTDB_BATCH_MODE_ENABLE_KEY), + PipeConnectorConstant.CONNECTOR_IOTDB_BATCH_MODE_ENABLE_DEFAULT_VALUE); + + if (!needUseBatch) { + return 0; + } + + return PipeConfig.getInstance().getSinkBatchMemoryInsertNode(); + } + + private long calculateSendTsFileReadBufferMemory( + final PipeParameters extractorParameters, + final PipeParameters processorParameters, + final PipeParameters connectorParameters) { + // If the extractor is history enable, we need to transfer tsfile + boolean needTransferTsFile = + extractorParameters.getBooleanOrDefault( + SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE) + || extractorParameters.getBooleanOrDefault( + Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY), + EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE); + + String format = + connectorParameters.getStringOrDefault( + Arrays.asList( + PipeConnectorConstant.CONNECTOR_FORMAT_KEY, PipeConnectorConstant.SINK_FORMAT_KEY), + PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE); + + // If the connector format is tsfile and hybrid, we need to transfer tsfile + needTransferTsFile = + needTransferTsFile + || PipeConnectorConstant.CONNECTOR_FORMAT_HYBRID_VALUE.equals(format) + || PipeConnectorConstant.CONNECTOR_FORMAT_TS_FILE_VALUE.equals(format); + + if (!needTransferTsFile) { + return 0; + } + + return PipeConfig.getInstance().getSendTsFileReadBuffer(); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java index 5e7901cb7c77..f84cb73fd692 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/builder/PipeDataNodeTaskBuilder.java @@ -27,7 +27,6 @@ import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; import org.apache.iotdb.db.pipe.agent.task.PipeDataNodeTask; -import org.apache.iotdb.db.pipe.agent.task.execution.PipeConnectorSubtaskExecutor; import org.apache.iotdb.db.pipe.agent.task.execution.PipeProcessorSubtaskExecutor; import org.apache.iotdb.db.pipe.agent.task.execution.PipeSubtaskExecutorManager; import org.apache.iotdb.db.pipe.agent.task.stage.PipeTaskConnectorStage; @@ -42,7 +41,6 @@ import org.slf4j.LoggerFactory; import java.util.Arrays; -import java.util.EnumMap; import java.util.HashMap; import java.util.Map; @@ -64,19 +62,8 @@ public class PipeDataNodeTaskBuilder { private final int regionId; private final PipeTaskMeta pipeTaskMeta; - private static final PipeProcessorSubtaskExecutor PROCESSOR_EXECUTOR; - private static final Map CONNECTOR_EXECUTOR_MAP; - - static { - PROCESSOR_EXECUTOR = PipeSubtaskExecutorManager.getInstance().getProcessorExecutor(); - CONNECTOR_EXECUTOR_MAP = new EnumMap<>(PipeType.class); - CONNECTOR_EXECUTOR_MAP.put( - PipeType.USER, PipeSubtaskExecutorManager.getInstance().getConnectorExecutor()); - CONNECTOR_EXECUTOR_MAP.put( - PipeType.SUBSCRIPTION, PipeSubtaskExecutorManager.getInstance().getSubscriptionExecutor()); - CONNECTOR_EXECUTOR_MAP.put( - PipeType.CONSENSUS, PipeSubtaskExecutorManager.getInstance().getConsensusExecutor()); - } + private static final PipeProcessorSubtaskExecutor PROCESSOR_EXECUTOR = + PipeSubtaskExecutorManager.getInstance().getProcessorExecutor(); protected final Map systemParameters = new HashMap<>(); @@ -117,7 +104,7 @@ public PipeDataNodeTask build() { pipeStaticMeta.getCreationTime(), connectorParameters, regionId, - CONNECTOR_EXECUTOR_MAP.get(pipeType)); + PipeSubtaskExecutorManager.getInstance().getSubscriptionExecutor()); } else { // user pipe or consensus pipe connectorStage = new PipeTaskConnectorStage( @@ -125,7 +112,9 @@ public PipeDataNodeTask build() { pipeStaticMeta.getCreationTime(), connectorParameters, regionId, - CONNECTOR_EXECUTOR_MAP.get(pipeType)); + pipeType.equals(PipeType.USER) + ? PipeSubtaskExecutorManager.getInstance().getConnectorExecutorSupplier() + : PipeSubtaskExecutorManager.getInstance().getConsensusExecutorSupplier()); } // The processor connects the extractor and connector. diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeConnectorSubtaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeConnectorSubtaskExecutor.java index fcb4888dbde5..0850c81a59d6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeConnectorSubtaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeConnectorSubtaskExecutor.java @@ -23,16 +23,20 @@ import org.apache.iotdb.commons.pipe.agent.task.execution.PipeSubtaskExecutor; import org.apache.iotdb.commons.pipe.config.PipeConfig; +import java.util.concurrent.atomic.AtomicInteger; + public class PipeConnectorSubtaskExecutor extends PipeSubtaskExecutor { + private static final AtomicInteger id = new AtomicInteger(0); public PipeConnectorSubtaskExecutor() { super( PipeConfig.getInstance().getPipeSubtaskExecutorMaxThreadNum(), - ThreadName.PIPE_CONNECTOR_EXECUTOR_POOL, + ThreadName.PIPE_CONNECTOR_EXECUTOR_POOL.getName() + "-" + id.get(), + ThreadName.PIPE_SUBTASK_CALLBACK_EXECUTOR_POOL.getName() + "-" + id.getAndIncrement(), true); } - public PipeConnectorSubtaskExecutor(final int corePoolSize, final ThreadName threadName) { + public PipeConnectorSubtaskExecutor(final int corePoolSize, final String threadName) { super(corePoolSize, threadName, true); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeProcessorSubtaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeProcessorSubtaskExecutor.java index f25d28b0c2c4..9eddd27f22e6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeProcessorSubtaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeProcessorSubtaskExecutor.java @@ -28,7 +28,7 @@ public class PipeProcessorSubtaskExecutor extends PipeSubtaskExecutor { public PipeProcessorSubtaskExecutor() { super( PipeConfig.getInstance().getPipeSubtaskExecutorMaxThreadNum(), - ThreadName.PIPE_PROCESSOR_EXECUTOR_POOL, + ThreadName.PIPE_PROCESSOR_EXECUTOR_POOL.getName(), false); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSubtaskExecutorManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSubtaskExecutorManager.java index f0de4d8d58e5..e0a71454d235 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSubtaskExecutorManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/execution/PipeSubtaskExecutorManager.java @@ -23,42 +23,44 @@ import org.apache.iotdb.db.pipe.consensus.PipeConsensusSubtaskExecutor; import org.apache.iotdb.db.subscription.task.execution.SubscriptionSubtaskExecutor; +import java.util.function.Supplier; + /** * PipeTaskExecutor is responsible for executing the pipe tasks, and it is scheduled by the * PipeTaskScheduler. It is a singleton class. */ public class PipeSubtaskExecutorManager { private final PipeProcessorSubtaskExecutor processorExecutor; - private final PipeConnectorSubtaskExecutor connectorExecutor; + private final Supplier connectorExecutorSupplier; private final SubscriptionSubtaskExecutor subscriptionExecutor; - private final PipeConsensusSubtaskExecutor consensusExecutor; + private final Supplier consensusExecutorSupplier; public PipeProcessorSubtaskExecutor getProcessorExecutor() { return processorExecutor; } - public PipeConnectorSubtaskExecutor getConnectorExecutor() { - return connectorExecutor; + public Supplier getConnectorExecutorSupplier() { + return connectorExecutorSupplier; } public SubscriptionSubtaskExecutor getSubscriptionExecutor() { return subscriptionExecutor; } - public PipeConsensusSubtaskExecutor getConsensusExecutor() { - return consensusExecutor; + public Supplier getConsensusExecutorSupplier() { + return consensusExecutorSupplier; } ///////////////////////// Singleton Instance Holder ///////////////////////// private PipeSubtaskExecutorManager() { processorExecutor = new PipeProcessorSubtaskExecutor(); - connectorExecutor = new PipeConnectorSubtaskExecutor(); + connectorExecutorSupplier = PipeConnectorSubtaskExecutor::new; subscriptionExecutor = SubscriptionConfig.getInstance().getSubscriptionEnabled() ? new SubscriptionSubtaskExecutor() : null; - consensusExecutor = new PipeConsensusSubtaskExecutor(); + consensusExecutorSupplier = PipeConsensusSubtaskExecutor::new; } private static class PipeTaskExecutorHolder { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskConnectorStage.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskConnectorStage.java index 345e6ab040c4..e5ae5dacd278 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskConnectorStage.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/stage/PipeTaskConnectorStage.java @@ -28,13 +28,15 @@ import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.exception.PipeException; +import java.util.function.Supplier; + public class PipeTaskConnectorStage extends PipeTaskStage { protected final String pipeName; protected final long creationTime; protected final PipeParameters pipeConnectorParameters; protected final int regionId; - protected final PipeConnectorSubtaskExecutor executor; + protected final Supplier executor; protected String connectorSubtaskId; @@ -43,7 +45,7 @@ public PipeTaskConnectorStage( long creationTime, PipeParameters pipeConnectorParameters, int regionId, - PipeConnectorSubtaskExecutor executor) { + Supplier executor) { this.pipeName = pipeName; this.creationTime = creationTime; this.pipeConnectorParameters = pipeConnectorParameters; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java index ecbbc641e4b9..6e4a858a3705 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskLifeCycle.java @@ -21,6 +21,7 @@ import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; import org.apache.iotdb.db.pipe.agent.task.execution.PipeConnectorSubtaskExecutor; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.pipe.api.event.Event; import org.slf4j.Logger; @@ -65,6 +66,8 @@ public synchronized void register() { if (registeredTaskCount == 0) { executor.register(subtask); runningTaskCount = 0; + + PipeDataNodeResourceManager.compaction().registerPipeConnectorSubtaskLifeCycle(this); } registeredTaskCount++; @@ -152,5 +155,7 @@ public synchronized void stop() { @Override public synchronized void close() { executor.deregister(subtask.getTaskID()); + + PipeDataNodeResourceManager.compaction().deregisterPipeConnectorSubtaskLifeCycle(this); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskManager.java index e556da428ef4..e25542135c1b 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeConnectorSubtaskManager.java @@ -48,6 +48,7 @@ import java.util.Map; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; public class PipeConnectorSubtaskManager { @@ -60,7 +61,7 @@ public class PipeConnectorSubtaskManager { attributeSortedString2SubtaskLifeCycleMap = new HashMap<>(); public synchronized String register( - final PipeConnectorSubtaskExecutor executor, + final Supplier executorSupplier, final PipeParameters pipeConnectorParameters, final PipeTaskConnectorRuntimeEnvironment environment) { final String connectorKey = @@ -109,6 +110,8 @@ public synchronized String register( environment.setAttributeSortedString(attributeSortedString); if (!attributeSortedString2SubtaskLifeCycleMap.containsKey(attributeSortedString)) { + final PipeConnectorSubtaskExecutor executor = executorSupplier.get(); + final List pipeConnectorSubtaskLifeCycleList = new ArrayList<>(connectorNum); @@ -169,6 +172,11 @@ public synchronized String register( pipeConnectorSubtaskLifeCycleList.add(pipeConnectorSubtaskLifeCycle); } + LOGGER.info( + "Pipe connector subtasks with attributes {} is bounded with connectorExecutor {} and callbackExecutor {}.", + attributeSortedString, + executor.getWorkingThreadName(), + executor.getCallbackThreadName()); attributeSortedString2SubtaskLifeCycleMap.put( attributeSortedString, pipeConnectorSubtaskLifeCycleList); } @@ -192,10 +200,19 @@ public synchronized void deregister( final List lifeCycles = attributeSortedString2SubtaskLifeCycleMap.get(attributeSortedString); + + // Shall not be empty + final PipeConnectorSubtaskExecutor executor = lifeCycles.get(0).executor; + lifeCycles.removeIf(o -> o.deregister(pipeName, regionId)); if (lifeCycles.isEmpty()) { attributeSortedString2SubtaskLifeCycleMap.remove(attributeSortedString); + executor.shutdown(); + LOGGER.info( + "The executor {} and {} has been successfully shutdown.", + executor.getWorkingThreadName(), + executor.getCallbackThreadName()); } PipeEventCommitManager.getInstance().deregister(pipeName, creationTime, regionId); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java index a4f05447eae2..409447b5b2fb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/agent/task/subtask/connector/PipeRealtimePriorityBlockingQueue.java @@ -21,23 +21,40 @@ import org.apache.iotdb.commons.pipe.agent.task.connection.BlockingPendingQueue; import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.db.pipe.agent.task.connection.PipeEventCollector; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeCompactedTsFileInsertionEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.BlockingDeque; import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; +import java.util.stream.Collectors; public class PipeRealtimePriorityBlockingQueue extends UnboundedBlockingPendingQueue { + private static final Logger LOGGER = + LoggerFactory.getLogger(PipeRealtimePriorityBlockingQueue.class); + private static final PipeConfig PIPE_CONFIG = PipeConfig.getInstance(); private final BlockingDeque tsfileInsertEventDeque = @@ -184,6 +201,138 @@ public Event peek() { return tsfileInsertEventDeque.peek(); } + public synchronized void replace( + String dataRegionId, Set sourceFiles, List targetFiles) { + + final int regionId = Integer.parseInt(dataRegionId); + final Map> eventsToBeRemovedGroupByCommitterKey = + tsfileInsertEventDeque.stream() + .filter( + event -> + event instanceof PipeTsFileInsertionEvent + && ((PipeTsFileInsertionEvent) event).getRegionId() == regionId) + .map(event -> (PipeTsFileInsertionEvent) event) + .collect( + Collectors.groupingBy( + PipeTsFileInsertionEvent::getCommitterKey, Collectors.toSet())) + .entrySet() + .stream() + // Replace if all source files are present in the queue + .filter(entry -> entry.getValue().size() == sourceFiles.size()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + if (eventsToBeRemovedGroupByCommitterKey.isEmpty()) { + LOGGER.info( + "Region {}: No TsFileInsertionEvents to replace for source files {}", + regionId, + sourceFiles.stream() + .map(TsFileResource::getTsFilePath) + .collect(Collectors.joining(", "))); + return; + } + + final Map> eventsToBeAddedGroupByCommitterKey = + new HashMap<>(); + for (final Map.Entry> entry : + eventsToBeRemovedGroupByCommitterKey.entrySet()) { + final CommitterKey committerKey = entry.getKey(); + final PipeTsFileInsertionEvent anyEvent = entry.getValue().stream().findFirst().orElse(null); + final Set newEvents = new HashSet<>(); + for (int i = 0; i < targetFiles.size(); i++) { + newEvents.add( + new PipeCompactedTsFileInsertionEvent( + committerKey, + entry.getValue(), + anyEvent, + targetFiles.get(i), + i == targetFiles.size() - 1)); + } + eventsToBeAddedGroupByCommitterKey.put(committerKey, newEvents); + } + + // Handling new events + final Set successfullyReferenceIncreasedEvents = new HashSet<>(); + final AtomicBoolean + allSuccess = // To track if all events successfully increased the reference count + new AtomicBoolean(true); + outerLoop: + for (final Map.Entry> committerKeySetEntry : + eventsToBeAddedGroupByCommitterKey.entrySet()) { + for (final PipeTsFileInsertionEvent event : committerKeySetEntry.getValue()) { + if (event != null) { + try { + if (!event.increaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName())) { + allSuccess.set(false); + break outerLoop; + } else { + successfullyReferenceIncreasedEvents.add(event); + } + } catch (final Exception e) { + allSuccess.set(false); + break outerLoop; + } + } + } + } + if (!allSuccess.get()) { + // If any event failed to increase the reference count, + // we need to decrease the reference count for all successfully increased events + for (final PipeTsFileInsertionEvent event : successfullyReferenceIncreasedEvents) { + try { + event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false); + } catch (final Exception e) { + LOGGER.warn( + "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue", + event, + e); + } + } + return; // Exit early if any event failed to increase the reference count + } else { + // If all events successfully increased reference count, + // we can proceed to add them to the deque + for (final PipeTsFileInsertionEvent event : successfullyReferenceIncreasedEvents) { + tsfileInsertEventDeque.add(event); + eventCounter.increaseEventCount(event); + } + } + + // Handling old events + for (final Map.Entry> entry : + eventsToBeRemovedGroupByCommitterKey.entrySet()) { + for (final PipeTsFileInsertionEvent event : entry.getValue()) { + if (event != null) { + try { + event.decreaseReferenceCount(PipeRealtimePriorityBlockingQueue.class.getName(), false); + } catch (final Exception e) { + LOGGER.warn( + "Failed to decrease reference count for event {} in PipeRealtimePriorityBlockingQueue", + event, + e); + } + eventCounter.decreaseEventCount(event); + } + } + } + final Set eventsToRemove = new HashSet<>(); + for (Set pipeTsFileInsertionEvents : + eventsToBeRemovedGroupByCommitterKey.values()) { + eventsToRemove.addAll(pipeTsFileInsertionEvents); + } + tsfileInsertEventDeque.removeIf(eventsToRemove::contains); + + LOGGER.info( + "Region {}: Replaced TsFileInsertionEvents {} with {}", + regionId, + eventsToBeRemovedGroupByCommitterKey.values().stream() + .flatMap(Set::stream) + .map(PipeTsFileInsertionEvent::coreReportMessage) + .collect(Collectors.joining(", ")), + eventsToBeAddedGroupByCommitterKey.values().stream() + .flatMap(Set::stream) + .map(PipeTsFileInsertionEvent::coreReportMessage) + .collect(Collectors.joining(", "))); + } + @Override public void clear() { super.clear(); @@ -244,7 +393,7 @@ public int getTsFileInsertionEventCount() { return tsfileInsertEventDeque.size(); } - public void setOfferTsFileCounter(AtomicInteger offerTsFileCounter) { + public synchronized void setOfferTsFileCounter(AtomicInteger offerTsFileCounter) { this.offerTsFileCounter = offerTsFileCounter; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeAsyncClientManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeAsyncClientManager.java index 9cb1b0a5cd08..2fc41a618667 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeAsyncClientManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/client/IoTDBDataNodeAsyncClientManager.java @@ -23,6 +23,8 @@ import org.apache.iotdb.commons.client.ClientPoolFactory; import org.apache.iotdb.commons.client.IClientManager; import org.apache.iotdb.commons.client.async.AsyncPipeDataTransferServiceClient; +import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; +import org.apache.iotdb.commons.concurrent.ThreadName; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.connector.client.IoTDBClientManager; @@ -48,7 +50,9 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_LOAD_BALANCE_PRIORITY_STRATEGY; @@ -70,7 +74,12 @@ public class IoTDBDataNodeAsyncClientManager extends IoTDBClientManager // receiverAttributes -> IClientManager private static final Map> ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER = new ConcurrentHashMap<>(); + private static final Map TS_FILE_ASYNC_EXECUTOR_HOLDER = + new ConcurrentHashMap<>(); + private static final AtomicInteger id = new AtomicInteger(0); + private final IClientManager endPoint2Client; + private ExecutorService executor; private final LoadBalancer loadBalancer; @@ -123,6 +132,17 @@ public IoTDBDataNodeAsyncClientManager( } endPoint2Client = ASYNC_PIPE_DATA_TRANSFER_CLIENT_MANAGER_HOLDER.get(receiverAttributes); + if (isTSFileUsed) { + if (!TS_FILE_ASYNC_EXECUTOR_HOLDER.containsKey(receiverAttributes)) { + TS_FILE_ASYNC_EXECUTOR_HOLDER.putIfAbsent( + receiverAttributes, + IoTDBThreadPoolFactory.newFixedThreadPool( + PipeConfig.getInstance().getPipeRealTimeQueueMaxWaitingTsFileSize(), + ThreadName.PIPE_TSFILE_ASYNC_SEND_POOL.getName() + "-" + id.getAndIncrement())); + } + executor = TS_FILE_ASYNC_EXECUTOR_HOLDER.get(receiverAttributes); + } + RECEIVER_ATTRIBUTES_REF_COUNT.compute( receiverAttributes, (attributes, refCount) -> refCount == null ? 1 : refCount + 1); } @@ -336,6 +356,10 @@ public void updateLeaderCache(final String deviceId, final TEndPoint endPoint) { LEADER_CACHE_MANAGER.updateLeaderEndPoint(deviceId, endPoint); } + public ExecutorService getExecutor() { + return executor; + } + public void close() { isClosed = true; synchronized (IoTDBDataNodeAsyncClientManager.class) { @@ -352,6 +376,18 @@ public void close() { LOGGER.warn("Failed to close client manager.", e); } } + + final ExecutorService executor = + TS_FILE_ASYNC_EXECUTOR_HOLDER.remove(receiverAttributes); + if (executor != null) { + try { + executor.shutdown(); + LOGGER.info("Successfully shutdown executor {}.", executor); + } catch (final Exception e) { + LOGGER.warn("Failed to shutdown executor {}.", executor); + } + } + return null; } return refCount - 1; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java index 1cb9f50d92d9..8d52055fcc97 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventBatch.java @@ -22,6 +22,7 @@ import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.db.pipe.connector.protocol.thrift.async.IoTDBDataRegionAsyncConnector; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeDynamicMemoryBlock; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlockType; import org.apache.iotdb.db.pipe.resource.memory.PipeModelFixedMemoryBlock; import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; @@ -47,7 +48,7 @@ public abstract class PipeTabletEventBatch implements AutoCloseable { private long firstEventProcessingTime = Long.MIN_VALUE; protected long totalBufferSize = 0; - private final PipeModelFixedMemoryBlock allocatedMemoryBlock; + private final PipeDynamicMemoryBlock allocatedMemoryBlock; protected volatile boolean isClosed = false; @@ -60,11 +61,9 @@ protected PipeTabletEventBatch(final int maxDelayInMs, final long requestMaxBatc // limit in buffer size this.allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateForModelFixedMemoryBlock( - requestMaxBatchSizeInBytes, PipeMemoryBlockType.BATCH); + pipeModelFixedMemoryBlock.registerPipeBatchMemoryBlock(requestMaxBatchSizeInBytes); - if (getMaxBatchSizeInBytes() != requestMaxBatchSizeInBytes) { + if (getMaxBatchSizeInBytes() != allocatedMemoryBlock.getMemoryUsageInBytes()) { LOGGER.info( "PipeTabletEventBatch: the max batch size is adjusted from {} to {} due to the " + "memory restriction", @@ -127,10 +126,17 @@ protected abstract boolean constructBatch(final TabletInsertionEvent event) throws WALPipeException, IOException; public boolean shouldEmit() { - return totalBufferSize >= getMaxBatchSizeInBytes() - || System.currentTimeMillis() - firstEventProcessingTime >= maxDelayInMs; + final long diff = System.currentTimeMillis() - firstEventProcessingTime; + if (totalBufferSize >= getMaxBatchSizeInBytes() || diff >= maxDelayInMs) { + allocatedMemoryBlock.updateCurrentMemoryEfficiencyAdjustMem((double) diff / maxDelayInMs); + recordMetric(diff, totalBufferSize); + return true; + } + return false; } + protected abstract void recordMetric(final long timeInterval, final long bufferSize); + private long getMaxBatchSizeInBytes() { return allocatedMemoryBlock.getMemoryUsageInBytes(); } @@ -194,9 +200,22 @@ public static void init() { } try { - pipeModelFixedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateForModelFixedMemoryBlock(0L, PipeMemoryBlockType.BATCH); + long batchSize = PipeDataNodeResourceManager.memory().getAllocatedMemorySizeInBytesOfBatch(); + for (long i = batchSize; i > 0; i = i / 2) { + try { + pipeModelFixedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateForModelFixedMemoryBlock(i, PipeMemoryBlockType.BATCH); + + LOGGER.info("pipe model fixed memory block initialized with size: {} bytes", i); + return; + } catch (Exception ignore) { + // ignore the exception and try to allocate a smaller size + LOGGER.info( + "pipe model fixed memory block initialized with size: {} bytes failed, try smaller size", + i); + } + } } catch (Exception e) { LOGGER.error("init pipe model fixed memory block failed", e); // If the allocation fails, we still need to create a default memory block to avoid NPE. diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java index 292fc018ed79..70ffaddb1800 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventPlainBatch.java @@ -23,6 +23,7 @@ import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletBatchReq; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; +import org.apache.iotdb.db.pipe.metric.sink.PipeDataRegionConnectorMetrics; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; @@ -70,6 +71,12 @@ protected boolean constructBatch(final TabletInsertionEvent event) return true; } + @Override + protected void recordMetric(long timeInterval, long bufferSize) { + PipeDataRegionConnectorMetrics.tabletBatchTimeIntervalHistogram.update(timeInterval); + PipeDataRegionConnectorMetrics.tabletBatchSizeHistogram.update(bufferSize); + } + @Override public synchronized void onSuccess() { super.onSuccess(); @@ -102,8 +109,7 @@ private int buildTabletInsertionBuffer(final TabletInsertionEvent event) (PipeInsertNodeTabletInsertionEvent) event; // Read the bytebuffer from the wal file and transfer it directly without serializing or // deserializing if possible - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); if (Objects.isNull(insertNode)) { buffer = pipeInsertNodeTabletInsertionEvent.getByteBuffer(); binaryBuffers.add(buffer); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventTsFileBatch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventTsFileBatch.java index fd4ce71b6f92..98e27d4e7c60 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventTsFileBatch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/batch/PipeTabletEventTsFileBatch.java @@ -25,6 +25,7 @@ import org.apache.iotdb.db.pipe.connector.util.PipeTabletEventSorter; import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; +import org.apache.iotdb.db.pipe.metric.sink.PipeDataRegionConnectorMetrics; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; @@ -181,6 +182,12 @@ protected boolean constructBatch(final TabletInsertionEvent event) { return true; } + @Override + protected void recordMetric(long timeInterval, long bufferSize) { + PipeDataRegionConnectorMetrics.tsFileBatchTimeIntervalHistogram.update(timeInterval); + PipeDataRegionConnectorMetrics.tsFileBatchSizeHistogram.update(bufferSize); + } + private void bufferTablet( final String pipeName, final long creationTime, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java index 580dbe2ed4ca..553a04cbfcd6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferPlanNodeReq.java @@ -66,7 +66,6 @@ public static PipeTransferPlanNodeReq fromTPipeTransferReq(TPipeTransferReq tran planNodeReq.version = transferReq.version; planNodeReq.type = transferReq.type; - planNodeReq.body = transferReq.body; return planNodeReq; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java index 8090f6504897..27664cdbac71 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBatchReq.java @@ -182,7 +182,6 @@ public static PipeTransferTabletBatchReq fromTPipeTransferReq( batchReq.version = transferReq.version; batchReq.type = transferReq.type; - batchReq.body = transferReq.body; return batchReq; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java index 5e9e0a39103f..4deac8a5cf69 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletBinaryReq.java @@ -89,7 +89,6 @@ public static PipeTransferTabletBinaryReq fromTPipeTransferReq( binaryReq.version = transferReq.version; binaryReq.type = transferReq.type; - binaryReq.body = transferReq.body; return binaryReq; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java index c45417ba99da..376ca121c80c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletInsertNodeReq.java @@ -96,7 +96,6 @@ public static PipeTransferTabletInsertNodeReq fromTPipeTransferReq( insertNodeReq.version = transferReq.version; insertNodeReq.type = transferReq.type; - insertNodeReq.body = transferReq.body; return insertNodeReq; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java index 61790f883ae5..f1e32785df77 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/payload/evolvable/request/PipeTransferTabletRawReq.java @@ -132,7 +132,6 @@ public static PipeTransferTabletRawReq fromTPipeTransferReq(final TPipeTransferR tabletReq.version = transferReq.version; tabletReq.type = transferReq.type; - tabletReq.body = transferReq.body; return tabletReq; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java index ee35ab46306a..bc4805e8c8ef 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/airgap/IoTDBDataRegionAirGapConnector.java @@ -168,8 +168,7 @@ private void doTransfer( final AirGapSocket socket, final PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletInsertionEvent) throws PipeException, WALPipeException, IOException { - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); final byte[] bytes = Objects.isNull(insertNode) ? PipeTransferTabletBinaryReq.toTPipeTransferBytes( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java index a74057c75c7f..1756da36db7d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusAsyncConnector.java @@ -277,8 +277,7 @@ public void transfer(TabletInsertionEvent tabletInsertionEvent) throws Exception return; } - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); final ProgressIndex progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex(); final TPipeConsensusTransferReq pipeConsensusTransferReq = Objects.isNull(insertNode) diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java index f4690783fe13..6186d9f671ac 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/PipeConsensusSyncConnector.java @@ -252,7 +252,7 @@ private void doTransfer(PipeInsertNodeTabletInsertionEvent pipeInsertNodeTabletI try (final SyncPipeConsensusServiceClient syncPipeConsensusServiceClient = syncRetryClientManager.borrowClient(getFollowerUrl())) { - insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex(); if (insertNode != null) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java index 870cea0ee87a..a3f8fe557b7f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/pipeconsensus/payload/builder/PipeConsensusTransferBatchReqBuilder.java @@ -196,8 +196,7 @@ protected int buildTabletInsertionBuffer(TabletInsertionEvent event) throws WALP // Read the bytebuffer from the wal file and transfer it directly without serializing or // deserializing if possible - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); // PipeConsensus will transfer binary data to TPipeConsensusTransferReq final ProgressIndex progressIndex = pipeInsertNodeTabletInsertionEvent.getProgressIndex(); if (Objects.isNull(insertNode)) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java index ac639fd22572..0dacf0cb9122 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/IoTDBDataRegionAsyncConnector.java @@ -74,8 +74,6 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -97,11 +95,10 @@ public class IoTDBDataRegionAsyncConnector extends IoTDBConnector { "Exception occurred while sending to receiver %s:%s."; private static final boolean isSplitTSFileBatchModeEnabled = true; - private static final ExecutorService executor = - Executors.newFixedThreadPool(PipeConfig.getInstance().getPipeAsyncConnectorMaxClientNumber()); private final IoTDBDataRegionSyncConnector syncConnector = new IoTDBDataRegionSyncConnector(); private final BlockingQueue retryEventQueue = new LinkedBlockingQueue<>(); + private final BlockingQueue retryTsFileQueue = new LinkedBlockingQueue<>(); private final PipeDataRegionEventCounter retryEventQueueEventCounter = new PipeDataRegionEventCounter(); @@ -265,8 +262,7 @@ private boolean transferInEventWithoutCheck(final TabletInsertionEvent tabletIns return false; } - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); final TPipeTransferReq pipeTransferReq = compressIfNeeded( Objects.isNull(insertNode) @@ -418,9 +414,9 @@ private void transfer(final PipeTransferTsFileHandler pipeTransferTsFileHandler) } return null; }, - executor); + transferTsFileClientManager.getExecutor()); - if (PipeConfig.getInstance().isTransferTsFileSync()) { + if (PipeConfig.getInstance().isTransferTsFileSync() || !isRealtimeFirst) { try { completableFuture.get(); } catch (InterruptedException e) { @@ -498,7 +494,7 @@ private void logOnClientException( * @see PipeConnector#transfer(TsFileInsertionEvent) for more details. */ private void transferQueuedEventsIfNecessary(final boolean forced) { - if (retryEventQueue.isEmpty() + if ((retryEventQueue.isEmpty() && retryTsFileQueue.isEmpty()) || (!forced && retryEventQueueEventCounter.getTabletInsertionEventCount() < PipeConfig.getInstance() @@ -506,38 +502,49 @@ private void transferQueuedEventsIfNecessary(final boolean forced) { && retryEventQueueEventCounter.getTsFileInsertionEventCount() < PipeConfig.getInstance() .getPipeAsyncConnectorForcedRetryTsFileEventQueueSizeThreshold() - && retryEventQueue.size() + && retryEventQueue.size() + retryTsFileQueue.size() < PipeConfig.getInstance() .getPipeAsyncConnectorForcedRetryTotalEventQueueSizeThreshold())) { return; } final long retryStartTime = System.currentTimeMillis(); - final int remainingEvents = retryEventQueue.size(); - while (!retryEventQueue.isEmpty()) { + final int remainingEvents = retryEventQueue.size() + retryTsFileQueue.size(); + while (!retryEventQueue.isEmpty() || !retryTsFileQueue.isEmpty()) { synchronized (this) { if (isClosed.get()) { return; } - if (retryEventQueue.isEmpty()) { + if (retryEventQueue.isEmpty() && retryTsFileQueue.isEmpty()) { break; } - final Event peekedEvent = retryEventQueue.peek(); + final Event peekedEvent; + final Event polledEvent; + if (!retryEventQueue.isEmpty()) { + peekedEvent = retryEventQueue.peek(); + + if (peekedEvent instanceof PipeInsertNodeTabletInsertionEvent) { + retryTransfer((PipeInsertNodeTabletInsertionEvent) peekedEvent); + } else if (peekedEvent instanceof PipeRawTabletInsertionEvent) { + retryTransfer((PipeRawTabletInsertionEvent) peekedEvent); + } else { + LOGGER.warn( + "IoTDBThriftAsyncConnector does not support transfer generic event: {}.", + peekedEvent); + } - if (peekedEvent instanceof PipeInsertNodeTabletInsertionEvent) { - retryTransfer((PipeInsertNodeTabletInsertionEvent) peekedEvent); - } else if (peekedEvent instanceof PipeRawTabletInsertionEvent) { - retryTransfer((PipeRawTabletInsertionEvent) peekedEvent); - } else if (peekedEvent instanceof PipeTsFileInsertionEvent) { - retryTransfer((PipeTsFileInsertionEvent) peekedEvent); + polledEvent = retryEventQueue.poll(); } else { - LOGGER.warn( - "IoTDBThriftAsyncConnector does not support transfer generic event: {}.", - peekedEvent); + if (transferTsFileCounter.get() + >= PipeConfig.getInstance().getPipeRealTimeQueueMaxWaitingTsFileSize()) { + return; + } + peekedEvent = retryTsFileQueue.peek(); + retryTransfer((PipeTsFileInsertionEvent) peekedEvent); + polledEvent = retryTsFileQueue.poll(); } - final Event polledEvent = retryEventQueue.poll(); retryEventQueueEventCounter.decreaseEventCount(polledEvent); if (polledEvent != peekedEvent) { LOGGER.error( @@ -560,16 +567,16 @@ private void transferQueuedEventsIfNecessary(final boolean forced) { && retryEventQueueEventCounter.getTsFileInsertionEventCount() < PipeConfig.getInstance() .getPipeAsyncConnectorForcedRetryTsFileEventQueueSizeThreshold() - && retryEventQueue.size() + && retryEventQueue.size() + retryTsFileQueue.size() < PipeConfig.getInstance() .getPipeAsyncConnectorForcedRetryTotalEventQueueSizeThreshold()) { return; } - if (remainingEvents <= retryEventQueue.size()) { + if (remainingEvents <= retryEventQueue.size() + retryTsFileQueue.size()) { throw new PipeException( "Failed to retry transferring events in the retry queue. Remaining events: " - + retryEventQueue.size() + + (retryEventQueue.size() + retryTsFileQueue.size()) + " (tablet events: " + retryEventQueueEventCounter.getTabletInsertionEventCount() + ", tsfile events: " @@ -647,8 +654,14 @@ public void addFailureEventToRetryQueue(final Event event) { return; } - retryEventQueue.offer(event); - retryEventQueueEventCounter.increaseEventCount(event); + if (event instanceof PipeTsFileInsertionEvent) { + retryTsFileQueue.offer((PipeTsFileInsertionEvent) event); + retryEventQueueEventCounter.increaseEventCount(event); + } else { + retryEventQueue.offer(event); + retryEventQueueEventCounter.increaseEventCount(event); + } + if (LOGGER.isDebugEnabled()) { LOGGER.debug("Added event {} to retry queue.", event); } @@ -688,6 +701,19 @@ public synchronized void discardEventsOfPipe(final String pipeNameToDrop, final } return false; }); + + retryTsFileQueue.removeIf( + event -> { + if (event instanceof EnrichedEvent + && pipeNameToDrop.equals(((EnrichedEvent) event).getPipeName()) + && regionId == ((EnrichedEvent) event).getRegionId()) { + ((EnrichedEvent) event) + .clearReferenceCount(IoTDBDataRegionAsyncConnector.class.getName()); + retryEventQueueEventCounter.decreaseEventCount(event); + return true; + } + return false; + }); } @Override @@ -730,8 +756,9 @@ public synchronized void close() { } public synchronized void clearRetryEventsReferenceCount() { - while (!retryEventQueue.isEmpty()) { - final Event event = retryEventQueue.poll(); + while (!retryEventQueue.isEmpty() || !retryTsFileQueue.isEmpty()) { + final Event event = + retryTsFileQueue.isEmpty() ? retryEventQueue.poll() : retryTsFileQueue.poll(); retryEventQueueEventCounter.decreaseEventCount(event); if (event instanceof EnrichedEvent) { ((EnrichedEvent) event).clearReferenceCount(IoTDBDataRegionAsyncConnector.class.getName()); @@ -742,7 +769,7 @@ public synchronized void clearRetryEventsReferenceCount() { //////////////////////// APIs provided for metric framework //////////////////////// public int getRetryEventQueueSize() { - return retryEventQueue.size(); + return retryEventQueue.size() + retryTsFileQueue.size(); } public int getBatchSize() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java index f77d41b6662d..b3eb223e58e2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java @@ -104,7 +104,7 @@ protected void onErrorInternal(final Exception exception) { ? ((EnrichedEvent) event).coreReportMessage() : event.toString(), event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitterKey() : null, - event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitId() : null, + event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitIds() : null, exception); } finally { connector.addFailureEventToRetryQueue(event); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java index ccce6ba0aa2a..7353ea91e912 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileHandler.java @@ -47,7 +47,6 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.util.Arrays; @@ -78,8 +77,8 @@ public class PipeTransferTsFileHandler extends PipeTransferTrackableHandler { private final boolean transferMod; private final int readFileBufferSize; - private final PipeTsFileMemoryBlock memoryBlock; - private final byte[] readBuffer; + private PipeTsFileMemoryBlock memoryBlock; + private byte[] readBuffer; private long position; private RandomAccessFile reader; @@ -98,7 +97,7 @@ public PipeTransferTsFileHandler( final File tsFile, final File modFile, final boolean transferMod) - throws FileNotFoundException, InterruptedException { + throws InterruptedException { super(connector); this.pipeName2WeightMap = pipeName2WeightMap; @@ -124,20 +123,8 @@ public PipeTransferTsFileHandler( Math.min( PipeConfig.getInstance().getPipeConnectorReadFileBufferSize(), transferMod ? Math.max(tsFile.length(), modFile.length()) : tsFile.length()); - memoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateForTsFileWithRetry( - PipeConfig.getInstance().isPipeConnectorReadFileBufferMemoryControlEnabled() - ? readFileBufferSize - : 0); - readBuffer = new byte[readFileBufferSize]; position = 0; - reader = - Objects.nonNull(modFile) - ? new RandomAccessFile(modFile, "r") - : new RandomAccessFile(tsFile, "r"); - isSealSignalSent = new AtomicBoolean(false); } @@ -145,6 +132,24 @@ public void transfer( final IoTDBDataNodeAsyncClientManager clientManager, final AsyncPipeDataTransferServiceClient client) throws TException, IOException { + // Delay creation of resources to avoid OOM or too many open files + if (readBuffer == null) { + memoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateForTsFileWithRetry( + PipeConfig.getInstance().isPipeConnectorReadFileBufferMemoryControlEnabled() + ? readFileBufferSize + : 0); + readBuffer = new byte[readFileBufferSize]; + } + + if (reader == null) { + reader = + Objects.nonNull(modFile) + ? new RandomAccessFile(modFile, "r") + : new RandomAccessFile(tsFile, "r"); + } + this.clientManager = clientManager; this.client = client; @@ -285,7 +290,7 @@ protected boolean onCompleteInternal(final TPipeTransferResp response) { "Successfully transferred file {} (committer key={}, commit id={}, reference count={}).", tsFile, events.stream().map(EnrichedEvent::getCommitterKey).collect(Collectors.toList()), - events.stream().map(EnrichedEvent::getCommitId).collect(Collectors.toList()), + events.stream().map(EnrichedEvent::getCommitIds).collect(Collectors.toList()), referenceCount); } else { LOGGER.info( @@ -350,7 +355,7 @@ protected void onErrorInternal(final Exception exception) { "Failed to transfer TsFileInsertionEvent {} (committer key {}, commit id {}).", tsFile, events.stream().map(EnrichedEvent::getCommitterKey).collect(Collectors.toList()), - events.stream().map(EnrichedEvent::getCommitId).collect(Collectors.toList()), + events.stream().map(EnrichedEvent::getCommitIds).collect(Collectors.toList()), exception); } else { LOGGER.warn( @@ -427,7 +432,9 @@ public void clearEventsReferenceCount() { @Override public void close() { super.close(); - memoryBlock.close(); + if (memoryBlock != null) { + memoryBlock.close(); + } } /** diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java index 784ee14a55a5..d996cac28208 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/sync/IoTDBDataRegionSyncConnector.java @@ -294,8 +294,7 @@ private void doTransfer( // getDeviceId() may return null for InsertRowsNode, will be equal to getClient(null) clientAndStatus = clientManager.getClient(pipeInsertNodeTabletInsertionEvent.getDeviceId()); - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); final TPipeTransferReq req = compressIfNeeded( insertNode != null diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java index 7d28f8609ad4..365950c51091 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/writeback/WriteBackConnector.java @@ -129,8 +129,7 @@ private void doTransfer( throws PipeException, WALPipeException { final TSStatus status; - final InsertNode insertNode = - pipeInsertNodeTabletInsertionEvent.getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = pipeInsertNodeTabletInsertionEvent.getInsertNode(); if (Objects.isNull(insertNode)) { status = PipeDataNodeAgent.receiver() diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java index 72782ad228e7..d50e08463e2d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/consensus/PipeConsensusSubtaskExecutor.java @@ -22,11 +22,15 @@ import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.db.pipe.agent.task.execution.PipeConnectorSubtaskExecutor; +import java.util.concurrent.atomic.AtomicInteger; + public class PipeConsensusSubtaskExecutor extends PipeConnectorSubtaskExecutor { + private static final AtomicInteger id = new AtomicInteger(0); + public PipeConsensusSubtaskExecutor() { super( PipeConfig.getInstance().getPipeSubtaskExecutorMaxThreadNum(), - ThreadName.PIPE_CONSENSUS_EXECUTOR_POOL); + ThreadName.PIPE_CONSENSUS_EXECUTOR_POOL + "-" + id.getAndIncrement()); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java index 80ac86310b09..ed7d59f0b35f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tablet/PipeInsertNodeTabletInsertionEvent.java @@ -30,6 +30,7 @@ import org.apache.iotdb.db.pipe.event.ReferenceTrackableEvent; import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.InsertNodeMemoryEstimator; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; import org.apache.iotdb.db.pipe.resource.memory.PipeTabletMemoryBlock; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; @@ -37,8 +38,6 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertTabletNode; import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryPosition; import org.apache.iotdb.pipe.api.access.Row; import org.apache.iotdb.pipe.api.collector.RowCollector; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; @@ -69,52 +68,26 @@ public class PipeInsertNodeTabletInsertionEvent extends EnrichedEvent LoggerFactory.getLogger(PipeInsertNodeTabletInsertionEvent.class); private static final long INSTANCE_SIZE = RamUsageEstimator.shallowSizeOfInstance(PipeInsertNodeTabletInsertionEvent.class) - + RamUsageEstimator.shallowSizeOfInstance(WALEntryHandler.class) - + RamUsageEstimator.shallowSizeOfInstance(WALEntryPosition.class) + RamUsageEstimator.shallowSizeOfInstance(AtomicInteger.class) + RamUsageEstimator.shallowSizeOfInstance(AtomicBoolean.class); - private final WALEntryHandler walEntryHandler; - private final boolean isAligned; - private final boolean isGeneratedByPipe; - private final AtomicReference allocatedMemoryBlock; private volatile List tablets; private List dataContainers; - private final PartialPath devicePath; + private InsertNode insertNode; private ProgressIndex progressIndex; private long extractTime = 0; - public PipeInsertNodeTabletInsertionEvent( - final WALEntryHandler walEntryHandler, - final PartialPath devicePath, - final ProgressIndex progressIndex, - final boolean isAligned, - final boolean isGeneratedByPipe) { - this( - walEntryHandler, - devicePath, - progressIndex, - isAligned, - isGeneratedByPipe, - null, - 0, - null, - null, - Long.MIN_VALUE, - Long.MAX_VALUE); + public PipeInsertNodeTabletInsertionEvent(final InsertNode insertNode) { + this(insertNode, null, 0, null, null, Long.MIN_VALUE, Long.MAX_VALUE); } private PipeInsertNodeTabletInsertionEvent( - final WALEntryHandler walEntryHandler, - final PartialPath devicePath, - final ProgressIndex progressIndex, - final boolean isAligned, - final boolean isGeneratedByPipe, + final InsertNode insertNode, final String pipeName, final long creationTime, final PipeTaskMeta pipeTaskMeta, @@ -122,34 +95,24 @@ private PipeInsertNodeTabletInsertionEvent( final long startTime, final long endTime) { super(pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime); - this.walEntryHandler = walEntryHandler; // Record device path here so there's no need to get it from InsertNode cache later. - this.devicePath = devicePath; - this.progressIndex = progressIndex; - this.isAligned = isAligned; - this.isGeneratedByPipe = isGeneratedByPipe; - + this.progressIndex = insertNode.getProgressIndex(); + this.insertNode = insertNode; this.allocatedMemoryBlock = new AtomicReference<>(); } - public InsertNode getInsertNode() throws WALPipeException { - return walEntryHandler.getInsertNode(); + public InsertNode getInsertNode() { + return insertNode; } public ByteBuffer getByteBuffer() throws WALPipeException { - return walEntryHandler.getByteBuffer(); - } - - // This method is a pre-determination of whether to use binary transfers. - // If the insert node is null in cache, it means that we need to read the bytebuffer from the wal, - // and when the pattern is default, we can transfer the bytebuffer directly without serializing or - // deserializing - public InsertNode getInsertNodeViaCacheIfPossible() { - return walEntryHandler.getInsertNodeViaCacheIfPossible(); + return insertNode.serializeToByteBuffer(); } public String getDeviceId() { - return Objects.nonNull(devicePath) ? devicePath.getFullPath() : null; + return Objects.nonNull(insertNode.getDevicePath()) + ? insertNode.getDevicePath().getFullPath() + : null; } /////////////////////////// EnrichedEvent /////////////////////////// @@ -158,19 +121,16 @@ public String getDeviceId() { public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) { extractTime = System.nanoTime(); try { - PipeDataNodeResourceManager.wal().pin(walEntryHandler); if (Objects.nonNull(pipeName)) { PipeDataNodeSinglePipeMetrics.getInstance() .increaseInsertNodeEventCount(pipeName, creationTime); - PipeDataNodeAgent.task().addFloatingMemoryUsageInByte(pipeName, ramBytesUsed()); + PipeDataNodeAgent.task() + .addFloatingMemoryUsageInByte(pipeName, creationTime, ramBytesUsed()); } return true; } catch (final Exception e) { LOGGER.warn( - String.format( - "Increase reference count for memTable %d error. Holder Message: %s", - walEntryHandler.getMemTableId(), holderMessage), - e); + String.format("Increase reference count error. Holder Message: %s", holderMessage), e); return false; } } @@ -178,7 +138,6 @@ public boolean internallyIncreaseResourceReferenceCount(final String holderMessa @Override public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) { try { - PipeDataNodeResourceManager.wal().unpin(walEntryHandler); // release the containers' memory and close memory block if (dataContainers != null) { dataContainers.clear(); @@ -188,17 +147,16 @@ public boolean internallyDecreaseResourceReferenceCount(final String holderMessa return true; } catch (final Exception e) { LOGGER.warn( - String.format( - "Decrease reference count for memtable %d error. Holder Message: %s", - walEntryHandler.getMemTableId(), holderMessage), - e); + String.format("Decrease reference count error. Holder Message: %s", holderMessage), e); return false; } finally { if (Objects.nonNull(pipeName)) { - PipeDataNodeAgent.task().decreaseFloatingMemoryUsageInByte(pipeName, ramBytesUsed()); + PipeDataNodeAgent.task() + .decreaseFloatingMemoryUsageInByte(pipeName, creationTime, ramBytesUsed()); PipeDataNodeSinglePipeMetrics.getInstance() .decreaseInsertNodeEventCount(pipeName, creationTime, System.nanoTime() - extractTime); } + insertNode = null; } } @@ -221,28 +179,18 @@ public PipeInsertNodeTabletInsertionEvent shallowCopySelfAndBindPipeTaskMetaForP final long startTime, final long endTime) { return new PipeInsertNodeTabletInsertionEvent( - walEntryHandler, - devicePath, - progressIndex, - isAligned, - isGeneratedByPipe, - pipeName, - creationTime, - pipeTaskMeta, - pattern, - startTime, - endTime); + insertNode, pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime); } @Override public boolean isGeneratedByPipe() { - return isGeneratedByPipe; + return insertNode.isGeneratedByPipe(); } @Override public boolean mayEventTimeOverlappedWithTimeRange() { try { - final InsertNode insertNode = getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = getInsertNode(); if (Objects.isNull(insertNode)) { return true; } @@ -286,7 +234,7 @@ public boolean mayEventTimeOverlappedWithTimeRange() { @Override public boolean mayEventPathsOverlappedWithPattern() { try { - final InsertNode insertNode = getInsertNodeViaCacheIfPossible(); + final InsertNode insertNode = getInsertNode(); if (Objects.isNull(insertNode)) { return true; } @@ -439,8 +387,11 @@ public List toRawTabletInsertionEvents() { @Override public String toString() { return String.format( - "PipeInsertNodeTabletInsertionEvent{walEntryHandler=%s, progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s, dataContainers=%s}", - walEntryHandler, progressIndex, isAligned, isGeneratedByPipe, dataContainers) + "PipeInsertNodeTabletInsertionEvent{progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s, dataContainers=%s}", + progressIndex, + Objects.nonNull(insertNode) ? insertNode.isAligned() : null, + Objects.nonNull(insertNode) ? insertNode.isGeneratedByPipe() : null, + dataContainers) + " - " + super.toString(); } @@ -448,8 +399,10 @@ public String toString() { @Override public String coreReportMessage() { return String.format( - "PipeInsertNodeTabletInsertionEvent{walEntryHandler=%s, progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s}", - walEntryHandler, progressIndex, isAligned, isGeneratedByPipe) + "PipeInsertNodeTabletInsertionEvent{progressIndex=%s, isAligned=%s, isGeneratedByPipe=%s}", + progressIndex, + Objects.nonNull(insertNode) ? insertNode.isAligned() : null, + Objects.nonNull(insertNode) ? insertNode.isGeneratedByPipe() : null) + " - " + super.coreReportMessage(); } @@ -462,7 +415,7 @@ public String coreReportMessage() { @Override public long ramBytesUsed() { return INSTANCE_SIZE - + (Objects.nonNull(devicePath) ? PartialPath.estimateSize(devicePath) : 0) + + (Objects.nonNull(insertNode) ? InsertNodeMemoryEstimator.sizeOf(insertNode) : 0) + (Objects.nonNull(progressIndex) ? progressIndex.ramBytesUsed() : 0); } @@ -476,28 +429,24 @@ protected void trackResource() { @Override public PipeEventResource eventResourceBuilder() { return new PipeInsertNodeTabletInsertionEventResource( - this.isReleased, this.referenceCount, this.walEntryHandler, this.allocatedMemoryBlock); + this.isReleased, this.referenceCount, this.allocatedMemoryBlock); } private static class PipeInsertNodeTabletInsertionEventResource extends PipeEventResource { - private final WALEntryHandler walEntryHandler; private final AtomicReference allocatedMemoryBlock; private PipeInsertNodeTabletInsertionEventResource( final AtomicBoolean isReleased, final AtomicInteger referenceCount, - final WALEntryHandler walEntryHandler, final AtomicReference allocatedMemoryBlock) { super(isReleased, referenceCount); - this.walEntryHandler = walEntryHandler; this.allocatedMemoryBlock = allocatedMemoryBlock; } @Override protected void finalizeResource() { try { - PipeDataNodeResourceManager.wal().unpin(walEntryHandler); allocatedMemoryBlock.getAndUpdate( memoryBlock -> { if (Objects.nonNull(memoryBlock)) { @@ -506,8 +455,7 @@ protected void finalizeResource() { return null; }); } catch (final Exception e) { - LOGGER.warn( - "Decrease reference count for memTable {} error.", walEntryHandler.getMemTableId(), e); + LOGGER.warn("Decrease reference count error.", e); } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java index ca78dd7d50ad..91d38cf3361d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/terminate/PipeTerminateEvent.java @@ -19,23 +19,16 @@ package org.apache.iotdb.db.pipe.event.common.terminate; -import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; -import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.agent.task.PipeDataNodeTask; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; -import org.apache.iotdb.db.storageengine.StorageEngine; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicLong; /** * The {@link PipeTerminateEvent} is an {@link EnrichedEvent} that controls the termination of pipe, @@ -45,40 +38,6 @@ */ public class PipeTerminateEvent extends EnrichedEvent { - private static final Logger LOGGER = LoggerFactory.getLogger(PipeTerminateEvent.class); - - private static final AtomicLong PROGRESS_REPORT_COUNT = new AtomicLong(0); - private static final AtomicLong LAST_PROGRESS_REPORT_TIME = new AtomicLong(0); - - public static void flushDataRegionIfNeeded() { - if (PROGRESS_REPORT_COUNT.get() > 0 - && PROGRESS_REPORT_COUNT.get() - > PipeConfig.getInstance().getPipeFlushAfterTerminateCount()) { - flushDataRegion(); - return; - } - - if (LAST_PROGRESS_REPORT_TIME.get() > 0 - && System.currentTimeMillis() - LAST_PROGRESS_REPORT_TIME.get() - > PipeConfig.getInstance().getPipeFlushAfterLastTerminateSeconds() * 1000L) { - flushDataRegion(); - } - } - - private static void flushDataRegion() { - try { - StorageEngine.getInstance().operateFlush(new TFlushReq()); - PROGRESS_REPORT_COUNT.set(0); - LAST_PROGRESS_REPORT_TIME.set(0); - LOGGER.info("Force flush all data regions because of last progress report time."); - } catch (final Exception e) { - LOGGER.warn( - "Failed to flush all data regions, please check the error message: {}", - e.getMessage(), - e); - } - } - private final int dataRegionId; public PipeTerminateEvent( @@ -135,9 +94,6 @@ public boolean mayEventPathsOverlappedWithPattern() { @Override public void reportProgress() { - PROGRESS_REPORT_COUNT.incrementAndGet(); - LAST_PROGRESS_REPORT_TIME.set(System.currentTimeMillis()); - // To avoid deadlock CompletableFuture.runAsync( () -> PipeDataNodeAgent.task().markCompleted(pipeName, dataRegionId)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java new file mode 100644 index 000000000000..9160ddf55af4 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeCompactedTsFileInsertionEvent.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.event.common.tsfile; + +import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; +import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey; +import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; +import org.apache.iotdb.commons.pipe.event.EnrichedEvent; +import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; +import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; + +import java.io.File; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +public class PipeCompactedTsFileInsertionEvent extends PipeTsFileInsertionEvent { + + private final String dataRegionId; + private final Set originFilePaths; + private final List commitIds; + + public PipeCompactedTsFileInsertionEvent( + final CommitterKey committerKey, + final Set originalEvents, + final PipeTsFileInsertionEvent anyOfOriginalEvents, + final TsFileResource tsFileResource, + final boolean shouldReportProgress) { + super( + tsFileResource, + null, + bindIsWithMod(originalEvents), + bindIsLoaded(originalEvents), + bindIsGeneratedByHistoricalExtractor(originalEvents), + committerKey.getPipeName(), + committerKey.getCreationTime(), + anyOfOriginalEvents.getPipeTaskMeta(), + anyOfOriginalEvents.getPipePattern(), + anyOfOriginalEvents.getStartTime(), + anyOfOriginalEvents.getEndTime()); + + this.dataRegionId = String.valueOf(committerKey.getRegionId()); + this.originFilePaths = + originalEvents.stream() + .map(PipeTsFileInsertionEvent::getTsFile) + .map(File::getPath) + .collect(Collectors.toSet()); + this.commitIds = + originalEvents.stream() + .map(PipeTsFileInsertionEvent::getCommitId) + .distinct() + .collect(Collectors.toList()); + + // init fields of EnrichedEvent + this.committerKey = committerKey; + isPatternParsed = bindIsPatternParsed(originalEvents); + isTimeParsed = bindIsTimeParsed(originalEvents); + this.shouldReportOnCommit = shouldReportProgress; + + // init fields of PipeTsFileInsertionEvent + flushPointCount = bindFlushPointCount(originalEvents); + overridingProgressIndex = bindOverridingProgressIndex(originalEvents); + } + + private static boolean bindIsWithMod(Set originalEvents) { + return originalEvents.stream().anyMatch(PipeTsFileInsertionEvent::isWithMod); + } + + private static boolean bindIsLoaded(Set originalEvents) { + return originalEvents.stream().anyMatch(PipeTsFileInsertionEvent::isLoaded); + } + + private static boolean bindIsGeneratedByHistoricalExtractor( + Set originalEvents) { + return originalEvents.stream() + .anyMatch(PipeTsFileInsertionEvent::isGeneratedByHistoricalExtractor); + } + + private static boolean bindIsTimeParsed(Set originalEvents) { + return originalEvents.stream().noneMatch(EnrichedEvent::shouldParseTime); + } + + private static boolean bindIsPatternParsed(Set originalEvents) { + return originalEvents.stream().noneMatch(EnrichedEvent::shouldParsePattern); + } + + private static long bindFlushPointCount(Set originalEvents) { + return originalEvents.stream() + .mapToLong( + e -> + e.getFlushPointCount() == TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET + ? 0 + : e.getFlushPointCount()) + .sum(); + } + + private ProgressIndex bindOverridingProgressIndex(Set originalEvents) { + ProgressIndex overridingProgressIndex = MinimumProgressIndex.INSTANCE; + for (PipeTsFileInsertionEvent originalEvent : originalEvents) { + if (originalEvent.overridingProgressIndex != null) { + overridingProgressIndex = + overridingProgressIndex.updateToMinimumEqualOrIsAfterProgressIndex( + originalEvent.overridingProgressIndex); + } + } + return overridingProgressIndex != null + && !overridingProgressIndex.equals(MinimumProgressIndex.INSTANCE) + ? overridingProgressIndex + : null; + } + + @Override + public int getRebootTimes() { + throw new UnsupportedOperationException( + "PipeCompactedTsFileInsertionEvent does not support getRebootTimes."); + } + + @Override + public boolean hasMultipleCommitIds() { + return true; + } + + @Override + public long getCommitId() { + // max of commitIds is used as the commit id for this event + return commitIds.stream() + .max(Long::compareTo) + .orElseThrow( + () -> + new IllegalStateException( + "No commit IDs found in PipeCompactedTsFileInsertionEvent.")); + } + + // return dummy events for each commit ID (except the max one) + @Override + public List getDummyEventsForCommitIds() { + return commitIds.stream() + .filter(commitId -> commitId != getCommitId()) + .map(PipeCompactedTsFileInsertionDummyEvent::new) + .collect(Collectors.toList()); + } + + @Override + public List getCommitIds() { + return commitIds; + } + + @Override + public boolean equalsInPipeConsensus(final Object o) { + throw new UnsupportedOperationException( + "PipeCompactedTsFileInsertionEvent does not support equalsInPipeConsensus."); + } + + @Override + public void eliminateProgressIndex() { + if (Objects.isNull(overridingProgressIndex)) { + for (final String originFilePath : originFilePaths) { + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex(dataRegionId, pipeName, originFilePath); + } + } + } + + public class PipeCompactedTsFileInsertionDummyEvent extends EnrichedEvent { + + private final long commitId; + + public PipeCompactedTsFileInsertionDummyEvent(final long commitId) { + super( + PipeCompactedTsFileInsertionEvent.this.pipeName, + PipeCompactedTsFileInsertionEvent.this.creationTime, + PipeCompactedTsFileInsertionEvent.this.pipeTaskMeta, + null, // PipePattern is not needed for dummy event + Long.MIN_VALUE, + Long.MAX_VALUE); + this.commitId = commitId; // Use the commitId passed in + this.shouldReportOnCommit = false; // Dummy events do not report progress + } + + @Override + public long getCommitId() { + return commitId; + } + + @Override + public boolean internallyIncreaseResourceReferenceCount(String holderMessage) { + return true; + } + + @Override + public boolean internallyDecreaseResourceReferenceCount(String holderMessage) { + return true; + } + + @Override + public ProgressIndex getProgressIndex() { + return MinimumProgressIndex.INSTANCE; + } + + @Override + public EnrichedEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport( + String pipeName, + long creationTime, + PipeTaskMeta pipeTaskMeta, + PipePattern pattern, + long startTime, + long endTime) { + return null; + } + + @Override + public boolean isGeneratedByPipe() { + return false; + } + + @Override + public boolean mayEventTimeOverlappedWithTimeRange() { + return false; + } + + @Override + public boolean mayEventPathsOverlappedWithPattern() { + return false; + } + + @Override + public String coreReportMessage() { + return "PipeCompactedTsFileInsertionDummyEvent"; + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java index 41931131eb34..de5281976d3d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/PipeTsFileInsertionEvent.java @@ -65,48 +65,37 @@ public class PipeTsFileInsertionEvent extends EnrichedEvent private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileInsertionEvent.class); - private final TsFileResource resource; - private File tsFile; - private long extractTime = 0; + protected final TsFileResource resource; + protected File tsFile; + protected long extractTime = 0; // This is true iff the modFile exists and should be transferred - private boolean isWithMod; - private File modFile; + protected boolean isWithMod; + protected File modFile; - private final boolean isLoaded; - private final boolean isGeneratedByPipe; - private final boolean isGeneratedByPipeConsensus; - private final boolean isGeneratedByHistoricalExtractor; + protected final boolean isLoaded; + protected final boolean isGeneratedByPipe; + protected final boolean isGeneratedByPipeConsensus; + protected final boolean isGeneratedByHistoricalExtractor; - private final AtomicBoolean isClosed; - private final AtomicReference dataContainer; + protected final AtomicBoolean isClosed; + protected final AtomicReference dataContainer; // The point count of the TsFile. Used for metrics on PipeConsensus' receiver side. // May be updated after it is flushed. Should be negative if not set. - private long flushPointCount = TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET; + protected long flushPointCount = TsFileProcessor.FLUSH_POINT_COUNT_NOT_SET; - private volatile ProgressIndex overridingProgressIndex; + protected volatile ProgressIndex overridingProgressIndex; - public PipeTsFileInsertionEvent( - final TsFileResource resource, - final boolean isLoaded, - final boolean isGeneratedByHistoricalExtractor) { + public PipeTsFileInsertionEvent(final TsFileResource resource, final boolean isLoaded) { // The modFile must be copied before the event is assigned to the listening pipes this( - resource, - true, - isLoaded, - isGeneratedByHistoricalExtractor, - null, - 0, - null, - null, - Long.MIN_VALUE, - Long.MAX_VALUE); + resource, null, true, isLoaded, false, null, 0, null, null, Long.MIN_VALUE, Long.MAX_VALUE); } public PipeTsFileInsertionEvent( final TsFileResource resource, + final File tsFile, final boolean isWithMod, final boolean isLoaded, final boolean isGeneratedByHistoricalExtractor, @@ -117,9 +106,13 @@ public PipeTsFileInsertionEvent( final long startTime, final long endTime) { super(pipeName, creationTime, pipeTaskMeta, pattern, startTime, endTime); - this.resource = resource; - tsFile = resource.getTsFile(); + + // For events created at assigner or historical extractor, the tsFile is get from the resource + // For events created for source, the tsFile is inherited from the assigner, because the + // original tsFile may be gone, and we need to get the assigner's hard-linked tsFile to + // hard-link it to each pipe dir + this.tsFile = Objects.isNull(tsFile) ? resource.getTsFile() : tsFile; final ModificationFile modFile = resource.getModFile(); this.isWithMod = isWithMod && modFile.exists(); @@ -130,6 +123,8 @@ public PipeTsFileInsertionEvent( this.isGeneratedByPipeConsensus = resource.isGeneratedByPipeConsensus(); this.isGeneratedByHistoricalExtractor = isGeneratedByHistoricalExtractor; + this.dataContainer = new AtomicReference<>(null); + isClosed = new AtomicBoolean(resource.isClosed()); // Register close listener if TsFile is not closed if (!isClosed.get()) { @@ -165,8 +160,6 @@ public PipeTsFileInsertionEvent( // If the status is "closed", then the resource status is "closed", the tsFile won't be altered // and can be sent. isClosed.set(resource.isClosed()); - - this.dataContainer = new AtomicReference<>(null); } /** @@ -174,6 +167,10 @@ public PipeTsFileInsertionEvent( * otherwise. */ public boolean waitForTsFileClose() throws InterruptedException { + if (Objects.isNull(resource)) { + return true; + } + if (!isClosed.get()) { isClosed.set(resource.isClosed()); @@ -204,6 +201,7 @@ public boolean waitForTsFileClose() throws InterruptedException { return !resource.isEmpty(); } + @Override public File getTsFile() { return tsFile; } @@ -226,10 +224,6 @@ public boolean isLoaded() { return isLoaded; } - public long getFileStartTime() { - return resource.getFileStartTime(); - } - /** * Only used for metrics on PipeConsensus' receiver side. If the event is recovered after data * node's restart, the flushPointCount can be not set. It's totally fine for the PipeConsensus' @@ -252,9 +246,10 @@ public long getTimePartitionId() { public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) { extractTime = System.nanoTime(); try { - tsFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(tsFile, true, resource); + tsFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(tsFile, true, pipeName); if (isWithMod) { - modFile = PipeDataNodeResourceManager.tsfile().increaseFileReference(modFile, false, null); + modFile = + PipeDataNodeResourceManager.tsfile().increaseFileReference(modFile, false, pipeName); } return true; } catch (final Exception e) { @@ -275,9 +270,9 @@ public boolean internallyIncreaseResourceReferenceCount(final String holderMessa @Override public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) { try { - PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile); + PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile, pipeName); if (isWithMod) { - PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile); + PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile, pipeName); } close(); return true; @@ -303,24 +298,7 @@ public void bindProgressIndex(final ProgressIndex overridingProgressIndex) { @Override public ProgressIndex getProgressIndex() { - try { - if (!waitForTsFileClose()) { - LOGGER.warn( - "Skipping temporary TsFile {}'s progressIndex, will report MinimumProgressIndex", - tsFile); - return MinimumProgressIndex.INSTANCE; - } - if (Objects.nonNull(overridingProgressIndex)) { - return overridingProgressIndex; - } - return resource.getMaxProgressIndexAfterClose(); - } catch (final InterruptedException e) { - LOGGER.warn( - String.format( - "Interrupted when waiting for closing TsFile %s.", resource.getTsFilePath())); - Thread.currentThread().interrupt(); - return MinimumProgressIndex.INSTANCE; - } + return resource.getMaxProgressIndex(); } /** @@ -346,9 +324,9 @@ protected void reportProgress() { } public void eliminateProgressIndex() { - if (Objects.isNull(overridingProgressIndex)) { + if (Objects.isNull(overridingProgressIndex) && Objects.nonNull(resource)) { PipeTsFileEpochProgressIndexKeeper.getInstance() - .eliminateProgressIndex(resource.getDataRegionId(), resource.getTsFilePath()); + .eliminateProgressIndex(resource.getDataRegionId(), pipeName, resource.getTsFilePath()); } } @@ -362,6 +340,7 @@ public PipeTsFileInsertionEvent shallowCopySelfAndBindPipeTaskMetaForProgressRep final long endTime) { return new PipeTsFileInsertionEvent( resource, + tsFile, isWithMod, isLoaded, isGeneratedByHistoricalExtractor, @@ -380,16 +359,15 @@ public boolean isGeneratedByPipe() { @Override public boolean mayEventTimeOverlappedWithTimeRange() { - // If the tsFile is not closed the resource.getFileEndTime() will be Long.MIN_VALUE - // In that case we only judge the resource.getFileStartTime() to avoid losing data - return isClosed.get() - ? startTime <= resource.getFileEndTime() && resource.getFileStartTime() <= endTime - : resource.getFileStartTime() <= endTime; + // Notice that this is only called at realtime extraction, and the tsFile is always closed + // Thus we can use the end time to judge the overlap + return Objects.isNull(resource) + || startTime <= resource.getFileEndTime() && resource.getFileStartTime() <= endTime; } @Override public boolean mayEventPathsOverlappedWithPattern() { - if (!resource.isClosed()) { + if (Objects.isNull(resource) || !resource.isClosed()) { return true; } @@ -397,7 +375,8 @@ public boolean mayEventPathsOverlappedWithPattern() { final Map deviceIsAlignedMap = PipeDataNodeResourceManager.tsfile() .getDeviceIsAlignedMapFromCache( - PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()), + PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir( + resource.getTsFile(), pipeName), false); final Set deviceSet = Objects.nonNull(deviceIsAlignedMap) ? deviceIsAlignedMap.keySet() : resource.getDevices(); @@ -569,7 +548,7 @@ private TsFileInsertionDataContainer initDataContainer() { } catch (final IOException e) { close(); - final String errorMsg = String.format("Read TsFile %s error.", resource.getTsFilePath()); + final String errorMsg = String.format("Read TsFile %s error.", tsFile.getPath()); LOGGER.warn(errorMsg, e); throw new PipeException(errorMsg); } @@ -617,8 +596,8 @@ public void close() { @Override public String toString() { return String.format( - "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, isClosed=%s, dataContainer=%s}", - resource, tsFile, isLoaded, isGeneratedByPipe, isClosed.get(), dataContainer) + "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, dataContainer=%s}", + resource, tsFile, isLoaded, isGeneratedByPipe, dataContainer) + " - " + super.toString(); } @@ -626,8 +605,8 @@ public String toString() { @Override public String coreReportMessage() { return String.format( - "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s, isClosed=%s}", - resource, tsFile, isLoaded, isGeneratedByPipe, isClosed.get()) + "PipeTsFileInsertionEvent{resource=%s, tsFile=%s, isLoaded=%s, isGeneratedByPipe=%s}", + resource, tsFile, isLoaded, isGeneratedByPipe) + " - " + super.coreReportMessage(); } @@ -644,6 +623,7 @@ public PipeEventResource eventResourceBuilder() { return new PipeTsFileInsertionEventResource( this.isReleased, this.referenceCount, + this.pipeName, this.tsFile, this.isWithMod, this.modFile, @@ -656,15 +636,18 @@ private static class PipeTsFileInsertionEventResource extends PipeEventResource private final boolean isWithMod; private final File modFile; private final AtomicReference dataContainer; + private final String pipeName; private PipeTsFileInsertionEventResource( final AtomicBoolean isReleased, final AtomicInteger referenceCount, + final String pipeName, final File tsFile, final boolean isWithMod, final File modFile, final AtomicReference dataContainer) { super(isReleased, referenceCount); + this.pipeName = pipeName; this.tsFile = tsFile; this.isWithMod = isWithMod; this.modFile = modFile; @@ -675,9 +658,9 @@ private PipeTsFileInsertionEventResource( protected void finalizeResource() { try { // decrease reference count - PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile); + PipeDataNodeResourceManager.tsfile().decreaseFileReference(tsFile, pipeName); if (isWithMod) { - PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile); + PipeDataNodeResourceManager.tsfile().decreaseFileReference(modFile, pipeName); } // close data container diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java index 21ce698141d8..1050950d1768 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/container/TsFileInsertionDataContainerProvider.java @@ -28,7 +28,8 @@ import org.apache.iotdb.db.pipe.event.common.tsfile.container.scan.TsFileInsertionScanDataContainer; import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResource; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; +import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFilePublicResource; import org.apache.tsfile.file.metadata.IDeviceID; import org.apache.tsfile.file.metadata.PlainDeviceID; @@ -79,8 +80,8 @@ public TsFileInsertionDataContainer provide() throws IOException { // Use scan container to save memory if ((double) PipeDataNodeResourceManager.memory().getUsedMemorySizeInBytes() - / PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes() - > PipeTsFileResource.MEMORY_SUFFICIENT_THRESHOLD) { + / PipeMemoryManager.getTotalNonFloatingMemorySizeInBytes() + > PipeTsFilePublicResource.MEMORY_SUFFICIENT_THRESHOLD) { return new TsFileInsertionScanDataContainer( pipeName, creationTime, tsFile, pattern, startTime, endTime, pipeTaskMeta, sourceEvent); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java index ef9db9a4cb02..0c3bce5c399e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/realtime/PipeRealtimeEventFactory.java @@ -28,7 +28,6 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; public class PipeRealtimeEventFactory { @@ -37,22 +36,13 @@ public class PipeRealtimeEventFactory { public static PipeRealtimeEvent createRealtimeEvent( final TsFileResource resource, final boolean isLoaded) { return TS_FILE_EPOCH_MANAGER.bindPipeTsFileInsertionEvent( - new PipeTsFileInsertionEvent(resource, isLoaded, false), resource); + new PipeTsFileInsertionEvent(resource, isLoaded), resource); } public static PipeRealtimeEvent createRealtimeEvent( - final WALEntryHandler walEntryHandler, - final InsertNode insertNode, - final TsFileResource resource) { + final InsertNode insertNode, final TsFileResource resource) { return TS_FILE_EPOCH_MANAGER.bindPipeInsertNodeTabletInsertionEvent( - new PipeInsertNodeTabletInsertionEvent( - walEntryHandler, - insertNode.getDevicePath(), - insertNode.getProgressIndex(), - insertNode.isAligned(), - insertNode.isGeneratedByPipe()), - insertNode, - resource); + new PipeInsertNodeTabletInsertionEvent(insertNode), insertNode, resource); } public static PipeRealtimeEvent createRealtimeEvent( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionWatermarkInjector.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionWatermarkInjector.java index a8d95d448103..612b1a9701f2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionWatermarkInjector.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/DataRegionWatermarkInjector.java @@ -48,10 +48,6 @@ public long getInjectionIntervalInMs() { return injectionIntervalInMs; } - public long getNextInjectionTime() { - return nextInjectionTime; - } - public PipeWatermarkEvent inject() { if (System.currentTimeMillis() < nextInjectionTime) { return null; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java index aa2f849f56f6..8158c22d1a68 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/IoTDBDataRegionExtractor.java @@ -20,7 +20,7 @@ package org.apache.iotdb.db.pipe.extractor.dataregion; import org.apache.iotdb.commons.consensus.DataRegionId; -import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.pipe.config.constant.SystemConstant; import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.extractor.IoTDBExtractor; @@ -38,7 +38,6 @@ import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics; import org.apache.iotdb.db.storageengine.StorageEngine; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode; import org.apache.iotdb.pipe.api.customizer.configuration.PipeExtractorRuntimeConfiguration; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; @@ -47,11 +46,12 @@ import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; import org.apache.iotdb.pipe.api.exception.PipeException; -import org.apache.tsfile.file.metadata.enums.CompressionType; import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; + import java.util.Arrays; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; @@ -95,13 +95,12 @@ public class IoTDBDataRegionExtractor extends IoTDBExtractor { private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBDataRegionExtractor.class); - private PipeHistoricalDataRegionExtractor historicalExtractor; + private @Nullable PipeHistoricalDataRegionExtractor historicalExtractor; private PipeRealtimeDataRegionExtractor realtimeExtractor; private DataRegionWatermarkInjector watermarkInjector; private boolean hasNoExtractionNeed = true; - private boolean shouldExtractInsertion = false; private boolean shouldExtractDeletion = false; @Override @@ -116,7 +115,6 @@ public void validate(final PipeParameterValidator validator) throws Exception { return; } hasNoExtractionNeed = false; - shouldExtractInsertion = insertionDeletionListeningOptionPair.getLeft(); shouldExtractDeletion = insertionDeletionListeningOptionPair.getRight(); if (insertionDeletionListeningOptionPair.getLeft().equals(true) @@ -213,10 +211,22 @@ public void validate(final PipeParameterValidator validator) throws Exception { EXTRACTOR_HISTORY_END_TIME_KEY); } - constructHistoricalExtractor(); + if (validator + .getParameters() + .getBooleanOrDefault(SystemConstant.RESTART_KEY, SystemConstant.RESTART_DEFAULT_VALUE) + || validator + .getParameters() + .getBooleanOrDefault( + Arrays.asList(EXTRACTOR_HISTORY_ENABLE_KEY, SOURCE_HISTORY_ENABLE_KEY), + EXTRACTOR_HISTORY_ENABLE_DEFAULT_VALUE)) { + // Do not flush or open historical extractor when historical tsFile is disabled + constructHistoricalExtractor(); + } constructRealtimeExtractor(validator.getParameters()); - historicalExtractor.validate(validator); + if (Objects.nonNull(historicalExtractor)) { + historicalExtractor.validate(validator); + } realtimeExtractor.validate(validator); } @@ -237,12 +247,10 @@ private void validatePattern(final PipePattern pattern) { } private void constructHistoricalExtractor() { - // Enable historical extractor by default historicalExtractor = new PipeHistoricalDataRegionTsFileExtractor(); } - private void constructRealtimeExtractor(final PipeParameters parameters) - throws IllegalPathException { + private void constructRealtimeExtractor(final PipeParameters parameters) { // Use heartbeat only extractor if disable realtime extractor if (!parameters.getBooleanOrDefault( Arrays.asList(EXTRACTOR_REALTIME_ENABLE_KEY, SOURCE_REALTIME_ENABLE_KEY), @@ -270,7 +278,6 @@ private void constructRealtimeExtractor(final PipeParameters parameters) // Use hybrid mode by default if (!parameters.hasAnyAttributes(EXTRACTOR_REALTIME_MODE_KEY, SOURCE_REALTIME_MODE_KEY)) { - checkWalEnableAndSetUncompressed(parameters); realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor(); LOGGER.info( "Pipe: '{}' is not set, use hybrid mode by default.", EXTRACTOR_REALTIME_MODE_KEY); @@ -285,15 +292,12 @@ private void constructRealtimeExtractor(final PipeParameters parameters) case EXTRACTOR_REALTIME_MODE_HYBRID_VALUE: case EXTRACTOR_REALTIME_MODE_LOG_VALUE: case EXTRACTOR_REALTIME_MODE_STREAM_MODE_VALUE: - checkWalEnableAndSetUncompressed(parameters); realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor(); break; case EXTRACTOR_REALTIME_MODE_FORCED_LOG_VALUE: - checkWalEnableAndSetUncompressed(parameters); realtimeExtractor = new PipeRealtimeDataRegionLogExtractor(); break; default: - checkWalEnableAndSetUncompressed(parameters); realtimeExtractor = new PipeRealtimeDataRegionHybridExtractor(); if (LOGGER.isWarnEnabled()) { LOGGER.warn( @@ -303,28 +307,6 @@ private void constructRealtimeExtractor(final PipeParameters parameters) } } - private void checkWalEnableAndSetUncompressed(final PipeParameters parameters) - throws IllegalPathException { - if (Boolean.TRUE.equals( - DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(parameters) - .getLeft()) - && IoTDBDescriptor.getInstance().getConfig().getWalMode().equals(WALMode.DISABLE)) { - throw new PipeException( - "The pipe cannot transfer realtime insertion if data region disables wal. Please set 'realtime.mode'='batch' in source parameters when enabling realtime transmission."); - } - - if (!IoTDBDescriptor.getInstance() - .getConfig() - .getWALCompressionAlgorithm() - .equals(CompressionType.UNCOMPRESSED)) { - LOGGER.info( - "The pipe prefers uncompressed wal, and may introduce certain delay in realtime insert syncing without it. Hence, we change it to uncompressed."); - IoTDBDescriptor.getInstance() - .getConfig() - .setWALCompressionAlgorithm(CompressionType.UNCOMPRESSED); - } - } - @Override public void customize( final PipeParameters parameters, final PipeExtractorRuntimeConfiguration configuration) @@ -335,7 +317,9 @@ public void customize( super.customize(parameters, configuration); - historicalExtractor.customize(parameters, configuration); + if (Objects.nonNull(historicalExtractor)) { + historicalExtractor.customize(parameters, configuration); + } realtimeExtractor.customize(parameters, configuration); // Set watermark injector @@ -372,7 +356,9 @@ public void start() throws Exception { "Pipe {}@{}: Starting historical extractor {} and realtime extractor {}.", pipeName, regionId, - historicalExtractor.getClass().getSimpleName(), + Objects.nonNull(historicalExtractor) + ? historicalExtractor.getClass().getSimpleName() + : null, realtimeExtractor.getClass().getSimpleName()); super.start(); @@ -407,7 +393,9 @@ public void start() throws Exception { "Pipe {}@{}: Started historical extractor {} and realtime extractor {} successfully within {} ms.", pipeName, regionId, - historicalExtractor.getClass().getSimpleName(), + Objects.nonNull(historicalExtractor) + ? historicalExtractor.getClass().getSimpleName() + : null, realtimeExtractor.getClass().getSimpleName(), System.currentTimeMillis() - startTime); return; @@ -425,14 +413,18 @@ private void startHistoricalExtractorAndRealtimeExtractor( // There can still be writing when tsFile events are added. If we start // realtimeExtractor after the process, then this part of data will be lost. realtimeExtractor.start(); - historicalExtractor.start(); + if (Objects.nonNull(historicalExtractor)) { + historicalExtractor.start(); + } } catch (final Exception e) { exceptionHolder.set(e); LOGGER.warn( "Pipe {}@{}: Start historical extractor {} and realtime extractor {} error.", pipeName, regionId, - historicalExtractor.getClass().getSimpleName(), + Objects.nonNull(historicalExtractor) + ? historicalExtractor.getClass().getSimpleName() + : null, realtimeExtractor.getClass().getSimpleName(), e); } @@ -451,7 +443,7 @@ public Event supply() throws Exception { } Event event = null; - if (!historicalExtractor.hasConsumedAll()) { + if (Objects.nonNull(historicalExtractor) && !historicalExtractor.hasConsumedAll()) { event = historicalExtractor.supply(); } else { if (Objects.nonNull(watermarkInjector)) { @@ -481,32 +473,21 @@ public void close() throws Exception { return; } - historicalExtractor.close(); + if (Objects.nonNull(historicalExtractor)) { + historicalExtractor.close(); + } realtimeExtractor.close(); if (Objects.nonNull(taskID)) { PipeDataRegionExtractorMetrics.getInstance().deregister(taskID); } } - //////////////////////////// APIs provided for detecting stuck //////////////////////////// - - public boolean shouldExtractInsertion() { - return shouldExtractInsertion; - } - - public boolean isStreamMode() { - return realtimeExtractor instanceof PipeRealtimeDataRegionHybridExtractor - || realtimeExtractor instanceof PipeRealtimeDataRegionLogExtractor; - } - - public boolean hasConsumedAllHistoricalTsFiles() { - return historicalExtractor.hasConsumedAll(); - } - //////////////////////////// APIs provided for metric framework //////////////////////////// public int getHistoricalTsFileInsertionEventCount() { - return hasBeenStarted.get() ? historicalExtractor.getPendingQueueSize() : 0; + return hasBeenStarted.get() && Objects.nonNull(historicalExtractor) + ? historicalExtractor.getPendingQueueSize() + : 0; } public int getTabletInsertionEventCount() { @@ -520,10 +501,4 @@ public int getRealtimeTsFileInsertionEventCount() { public int getPipeHeartbeatEventCount() { return hasBeenStarted.get() ? realtimeExtractor.getPipeHeartbeatEventCount() : 0; } - - public int getEventCount() { - return hasBeenStarted.get() - ? (historicalExtractor.getPendingQueueSize() + realtimeExtractor.getEventCount()) - : 0; - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java index 5ad3c6039f60..c1c4893ae8a9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java @@ -33,9 +33,9 @@ import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; import org.apache.iotdb.db.storageengine.StorageEngine; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; +import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import org.apache.iotdb.db.storageengine.dataregion.tsfile.generator.TsFileNameGenerator; @@ -57,10 +57,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; @@ -94,9 +94,6 @@ public class PipeHistoricalDataRegionTsFileExtractor implements PipeHistoricalDa private static final Logger LOGGER = LoggerFactory.getLogger(PipeHistoricalDataRegionTsFileExtractor.class); - private static final Map DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP = new HashMap<>(); - private static final long PIPE_MIN_FLUSH_INTERVAL_IN_MS = 2000; - private String pipeName; private long creationTime; @@ -219,17 +216,14 @@ public void validate(final PipeParameterValidator validator) { try { historicalDataExtractionStartTime = - isHistoricalExtractorEnabled - && parameters.hasAnyAttributes( - EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY) + parameters.hasAnyAttributes( + EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY) ? DateTimeUtils.convertTimestampOrDatetimeStrToLongWithDefaultZone( parameters.getStringByKeys( EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY)) : Long.MIN_VALUE; historicalDataExtractionEndTime = - isHistoricalExtractorEnabled - && parameters.hasAnyAttributes( - EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY) + parameters.hasAnyAttributes(EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY) ? DateTimeUtils.convertTimestampOrDatetimeStrToLongWithDefaultZone( parameters.getStringByKeys( EXTRACTOR_HISTORY_END_TIME_KEY, SOURCE_HISTORY_END_TIME_KEY)) @@ -270,10 +264,6 @@ public void customize( startIndex = environment.getPipeTaskMeta().getProgressIndex(); dataRegionId = environment.getRegionId(); - synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) { - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.putIfAbsent(dataRegionId, 0L); - } - pipePattern = PipePattern.parsePipePatternFromSourceParameters(parameters); final DataRegion dataRegion = @@ -297,34 +287,6 @@ public void customize( // PipeHistoricalDataRegionExtractor from implementation perspective. : environment.getCreationTime(); - // Only invoke flushDataRegionAllTsFiles() when the pipe runs in the realtime only mode. - // realtime only mode -> (historicalDataExtractionTimeLowerBound != Long.MIN_VALUE) - // - // Ensure that all data in the data region is flushed to disk before extracting data. - // This ensures the generation time of all newly generated TsFiles (realtime data) after the - // invocation of flushDataRegionAllTsFiles() is later than the creationTime of the pipe - // (historicalDataExtractionTimeLowerBound). - // - // Note that: the generation time of the TsFile is the time when the TsFile is created, not - // the time when the data is flushed to the TsFile. - // - // Then we can use the generation time of the TsFile to determine whether the data in the - // TsFile should be extracted by comparing the generation time of the TsFile with the - // historicalDataExtractionTimeLowerBound when starting the pipe in realtime only mode. - // - // If we don't invoke flushDataRegionAllTsFiles() in the realtime only mode, the data generated - // between the creation time of the pipe the time when the pipe starts will be lost. - if (historicalDataExtractionTimeLowerBound != Long.MIN_VALUE) { - synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) { - final long lastFlushedByPipeTime = - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.get(dataRegionId); - if (System.currentTimeMillis() - lastFlushedByPipeTime >= PIPE_MIN_FLUSH_INTERVAL_IN_MS) { - flushDataRegionAllTsFiles(); - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.replace(dataRegionId, System.currentTimeMillis()); - } - } - } - shouldTransferModFile = parameters.getBooleanOrDefault( Arrays.asList(SOURCE_MODS_ENABLE_KEY, EXTRACTOR_MODS_ENABLE_KEY), @@ -354,24 +316,9 @@ public void customize( } } - private void flushDataRegionAllTsFiles() { - final DataRegion dataRegion = - StorageEngine.getInstance().getDataRegion(new DataRegionId(dataRegionId)); - if (Objects.isNull(dataRegion)) { - return; - } - - dataRegion.writeLock("Pipe: create historical TsFile extractor"); - try { - dataRegion.syncCloseAllWorkingTsFileProcessors(); - } finally { - dataRegion.writeUnlock(); - } - } - @Override public synchronized void start() { - if (!shouldExtractInsertion) { + if (!shouldExtractInsertion || !isHistoricalExtractorEnabled) { hasBeenStarted = true; return; } @@ -403,48 +350,29 @@ public synchronized void start() { // consensus pipe, and the lastFlushed timestamp is not updated here. if (pipeName.startsWith(PipeStaticMeta.CONSENSUS_PIPE_PREFIX)) { dataRegion.syncCloseAllWorkingTsFileProcessors(); - LOGGER.info( - "Pipe {}@{}: finish to flush data region, took {} ms", - pipeName, - dataRegionId, - System.currentTimeMillis() - startHistoricalExtractionTime); } else { - synchronized (DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP) { - final long lastFlushedByPipeTime = - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.get(dataRegionId); - if (System.currentTimeMillis() - lastFlushedByPipeTime >= PIPE_MIN_FLUSH_INTERVAL_IN_MS) { - dataRegion.syncCloseAllWorkingTsFileProcessors(); - DATA_REGION_ID_TO_PIPE_FLUSHED_TIME_MAP.replace( - dataRegionId, System.currentTimeMillis()); - LOGGER.info( - "Pipe {}@{}: finish to flush data region, took {} ms", - pipeName, - dataRegionId, - System.currentTimeMillis() - startHistoricalExtractionTime); - } else { - LOGGER.info( - "Pipe {}@{}: skip to flush data region, last flushed time {} ms ago", - pipeName, - dataRegionId, - System.currentTimeMillis() - lastFlushedByPipeTime); - } - } + dataRegion.asyncCloseAllWorkingTsFileProcessors(); } + LOGGER.info( + "Pipe {}@{}: finish to flush data region, took {} ms", + pipeName, + dataRegionId, + System.currentTimeMillis() - startHistoricalExtractionTime); final TsFileManager tsFileManager = dataRegion.getTsFileManager(); tsFileManager.readLock(); try { final int originalSequenceTsFileCount = tsFileManager.size(true); - final int originalUnsequenceTsFileCount = tsFileManager.size(false); + final int originalUnSequenceTsFileCount = tsFileManager.size(false); final List resourceList = - new ArrayList<>(originalSequenceTsFileCount + originalUnsequenceTsFileCount); + new ArrayList<>(originalSequenceTsFileCount + originalUnSequenceTsFileCount); LOGGER.info( "Pipe {}@{}: start to extract historical TsFile, original sequence file count {}, " - + "original unsequence file count {}, start progress index {}", + + "original unSequence file count {}, start progress index {}", pipeName, dataRegionId, originalSequenceTsFileCount, - originalUnsequenceTsFileCount, + originalUnSequenceTsFileCount, startIndex); final Collection sequenceTsFileResources = @@ -457,9 +385,13 @@ public synchronized void start() { // not transfer pipe requests. && (!resource.isGeneratedByPipe() || isForwardingPipeRequests) && ( - // Some resource may not be closed due to the control of - // PIPE_MIN_FLUSH_INTERVAL_IN_MS. We simply ignore them. + // If the tsFile is not already marked closing, it is not captured by + // the pipe realtime module. Thus, we can wait for the realtime sync + // module to handle this, to avoid blocking the pipe sync process. !resource.isClosed() + && Optional.ofNullable(resource.getProcessor()) + .map(TsFileProcessor::alreadyMarkedClosing) + .orElse(true) || mayTsFileContainUnprocessedData(resource) && isTsFileResourceOverlappedWithTimeRange(resource) && isTsFileGeneratedAfterExtractionTimeLowerBound(resource) @@ -467,7 +399,7 @@ && mayTsFileResourceOverlappedWithPattern(resource))) .collect(Collectors.toList()); resourceList.addAll(sequenceTsFileResources); - final Collection unsequenceTsFileResources = + final Collection unSequenceTsFileResources = tsFileManager.getTsFileList(false).stream() .filter( resource -> @@ -477,15 +409,19 @@ && mayTsFileResourceOverlappedWithPattern(resource))) // not transfer pipe requests. && (!resource.isGeneratedByPipe() || isForwardingPipeRequests) && ( - // Some resource may not be closed due to the control of - // PIPE_MIN_FLUSH_INTERVAL_IN_MS. We simply ignore them. + // If the tsFile is not already marked closing, it is not captured by + // the pipe realtime module. Thus, we can wait for the realtime sync + // module to handle this, to avoid blocking the pipe sync process. !resource.isClosed() + && Optional.ofNullable(resource.getProcessor()) + .map(TsFileProcessor::alreadyMarkedClosing) + .orElse(true) || mayTsFileContainUnprocessedData(resource) && isTsFileResourceOverlappedWithTimeRange(resource) && isTsFileGeneratedAfterExtractionTimeLowerBound(resource) && mayTsFileResourceOverlappedWithPattern(resource))) .collect(Collectors.toList()); - resourceList.addAll(unsequenceTsFileResources); + resourceList.addAll(unSequenceTsFileResources); resourceList.removeIf( resource -> { @@ -493,7 +429,7 @@ && mayTsFileResourceOverlappedWithPattern(resource))) // Will unpin it after the PipeTsFileInsertionEvent is created and pinned. try { PipeDataNodeResourceManager.tsfile() - .pinTsFileResource(resource, shouldTransferModFile); + .pinTsFileResource(resource, shouldTransferModFile, pipeName); return false; } catch (final IOException e) { LOGGER.warn("Pipe: failed to pin TsFileResource {}", resource.getTsFilePath(), e); @@ -515,10 +451,10 @@ && mayTsFileResourceOverlappedWithPattern(resource))) dataRegionId, sequenceTsFileResources.size(), originalSequenceTsFileCount, - unsequenceTsFileResources.size(), - originalUnsequenceTsFileCount, + unSequenceTsFileResources.size(), + originalUnSequenceTsFileCount, resourceList.size(), - originalSequenceTsFileCount + originalUnsequenceTsFileCount, + originalSequenceTsFileCount + originalUnSequenceTsFileCount, System.currentTimeMillis() - startHistoricalExtractionTime); } finally { tsFileManager.readUnlock(); @@ -537,8 +473,8 @@ private boolean mayTsFileContainUnprocessedData(final TsFileResource resource) { if (startIndex instanceof StateProgressIndex) { startIndex = ((StateProgressIndex) startIndex).getInnerProgressIndex(); } - return !startIndex.isAfter(resource.getMaxProgressIndexAfterClose()) - && !startIndex.equals(resource.getMaxProgressIndexAfterClose()); + return !startIndex.isAfter(resource.getMaxProgressIndex()) + && !startIndex.equals(resource.getMaxProgressIndex()); } private boolean mayTsFileResourceOverlappedWithPattern(final TsFileResource resource) { @@ -546,9 +482,7 @@ private boolean mayTsFileResourceOverlappedWithPattern(final TsFileResource reso try { final Map deviceIsAlignedMap = PipeDataNodeResourceManager.tsfile() - .getDeviceIsAlignedMapFromCache( - PipeTsFileResourceManager.getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()), - false); + .getDeviceIsAlignedMapFromCache(resource.getTsFile(), false); deviceSet = Objects.nonNull(deviceIsAlignedMap) ? deviceIsAlignedMap.keySet() : resource.getDevices(); } catch (final IOException e) { @@ -606,6 +540,7 @@ public synchronized Event supply() { } final TsFileResource resource = pendingQueue.poll(); + if (resource == null) { final PipeTerminateEvent terminateEvent = new PipeTerminateEvent(pipeName, creationTime, pipeTaskMeta, dataRegionId); @@ -624,6 +559,7 @@ public synchronized Event supply() { final PipeTsFileInsertionEvent event = new PipeTsFileInsertionEvent( resource, + null, shouldTransferModFile, false, true, @@ -654,7 +590,7 @@ public synchronized Event supply() { return isReferenceCountIncreased ? event : null; } finally { try { - PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource); + PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource, pipeName); } catch (final IOException e) { LOGGER.warn( "Pipe {}@{}: failed to unpin TsFileResource after creating event, original path: {}", @@ -684,7 +620,7 @@ public synchronized void close() { pendingQueue.forEach( resource -> { try { - PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource); + PipeDataNodeResourceManager.tsfile().unpinTsFileResource(resource, pipeName); } catch (final IOException e) { LOGGER.warn( "Pipe {}@{}: failed to unpin TsFileResource after dropping pipe, original path: {}", diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java index c7a778357a44..0eaa94e0262c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.pipe.extractor.dataregion.realtime; import org.apache.iotdb.commons.consensus.DataRegionId; +import org.apache.iotdb.commons.consensus.index.ProgressIndex; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; @@ -31,8 +32,11 @@ import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; +import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; +import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; import org.apache.iotdb.db.pipe.extractor.dataregion.DataRegionListeningFilter; +import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeInsertionDataNodeListener; import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.listener.PipeTimePartitionListener; import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; @@ -395,6 +399,13 @@ protected void extractDirectly(final PipeRealtimeEvent event) { } } + protected void maySkipIndex4Event(final PipeRealtimeEvent event) { + if (event.getEvent() instanceof PipeTsFileInsertionEvent + || event.getEvent() instanceof PipeInsertNodeTabletInsertionEvent) { + maySkipProgressIndexForRealtimeEvent(event); + } + } + protected Event supplyHeartbeat(final PipeRealtimeEvent event) { if (event.increaseReferenceCount(PipeRealtimeDataRegionExtractor.class.getName())) { return event.getEvent(); @@ -496,6 +507,30 @@ public final boolean isShouldTransferModFile() { return shouldTransferModFile; } + private void maySkipProgressIndexForRealtimeEvent(final PipeRealtimeEvent event) { + if (PipeTsFileEpochProgressIndexKeeper.getInstance() + .isProgressIndexAfterOrEquals( + dataRegionId, + pipeName, + event.getTsFileEpoch().getFilePath(), + getProgressIndex4RealtimeEvent(event))) { + event.skipReportOnCommit(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Pipe {} on data region {} skip commit of event {} because it was flushed prematurely.", + pipeName, + dataRegionId, + event.coreReportMessage()); + } + } + } + + private ProgressIndex getProgressIndex4RealtimeEvent(final PipeRealtimeEvent event) { + return event.getEvent() instanceof PipeTsFileInsertionEvent + ? ((PipeTsFileInsertionEvent) event.getEvent()).forceGetProgressIndex() + : event.getProgressIndex(); + } + @Override public String toString() { return "PipeRealtimeDataRegionExtractor{" diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java index 500a195e6002..51bd3819f546 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java @@ -22,7 +22,6 @@ import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; -import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; @@ -31,12 +30,9 @@ import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch; -import org.apache.iotdb.db.pipe.metric.overview.PipeDataNodeSinglePipeMetrics; import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; -import org.apache.iotdb.db.storageengine.StorageEngine; -import org.apache.iotdb.db.storageengine.dataregion.wal.WALManager; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; @@ -45,7 +41,6 @@ import org.slf4j.LoggerFactory; import java.util.Objects; -import java.util.function.Consumer; public class PipeRealtimeDataRegionHybridExtractor extends PipeRealtimeDataRegionExtractor { @@ -87,6 +82,8 @@ private void extractTabletInsertion(final PipeRealtimeEvent event) { if (canNotUseTabletAnyMore(event)) { event.getTsFileEpoch().migrateState(this, curState -> TsFileEpoch.State.USING_TSFILE); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .registerProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getResource()); } else { event .getTsFileEpoch() @@ -145,6 +142,8 @@ private void extractTabletInsertion(final PipeRealtimeEvent event) { } private void extractTsFileInsertion(final PipeRealtimeEvent event) { + // Notice that, if the tsFile is partially extracted because the pipe is not opened before, the + // former data won't be extracted event .getTsFileEpoch() .migrateState( @@ -152,34 +151,13 @@ private void extractTsFileInsertion(final PipeRealtimeEvent event) { state -> { switch (state) { case EMPTY: + return ((PipeTsFileInsertionEvent) event.getEvent()).isLoaded() + ? TsFileEpoch.State.USING_TSFILE + : TsFileEpoch.State.USING_TABLET; + case USING_TABLET: + return TsFileEpoch.State.USING_TABLET; case USING_TSFILE: return TsFileEpoch.State.USING_TSFILE; - case USING_TABLET: - if (((PipeTsFileInsertionEvent) event.getEvent()).getFileStartTime() - < event.getTsFileEpoch().getInsertNodeMinTime()) { - // Some insert nodes in the tsfile epoch are not captured by pipe, so we should - // capture the tsfile event to make sure all data in the tsfile epoch can be - // extracted. - // - // The situation can be caused by the following operations: - // 1. PipeA: start historical data extraction with flush - // 2. Data insertion - // 3. PipeB: start realtime data extraction - // 4. PipeB: start historical data extraction without flush - // 5. Data inserted in the step2 is not captured by PipeB, and if its tsfile - // epoch's state is USING_TABLET, the tsfile event will be ignored, which - // will cause the data loss in the tsfile epoch. - LOGGER.info( - "The tsFile {}'s epoch's start time {} is smaller than the captured insertNodes' min time {}, will regard it as data loss or un-sequential, will extract the tsFile", - ((PipeTsFileInsertionEvent) event.getEvent()).getTsFile(), - ((PipeTsFileInsertionEvent) event.getEvent()).getFileStartTime(), - event.getTsFileEpoch().getInsertNodeMinTime()); - return TsFileEpoch.State.USING_BOTH; - } else { - // All data in the tsfile epoch has been extracted in tablet mode, so we should - // simply keep the state of the tsfile epoch and discard the tsfile event. - return TsFileEpoch.State.USING_TABLET; - } case USING_BOTH: default: return canNotUseTabletAnyMore(event) @@ -225,50 +203,13 @@ private void extractTsFileInsertion(final PipeRealtimeEvent event) { private boolean canNotUseTabletAnyMore(final PipeRealtimeEvent event) { // In the following 4 cases, we should not extract this tablet event. all the data // represented by the tablet event should be carried by the following tsfile event: - // 0. If the remaining insert event count is too large, we need to reduce the accumulated - // tablets. - // 1. If Wal size > maximum size of wal buffer, // the write operation will be throttled, so we should not extract any more tablet events. - // 2. The shallow memory usage of the insert node has reached the dangerous threshold. - // 3. Deprecated logics (unused by default) - return mayRemainingInsertNodeEventExceedLimit(event) - || mayWalSizeReachThrottleThreshold(event) - || mayInsertNodeMemoryReachDangerousThreshold(event) + // 1. The shallow memory usage of the insert node has reached the dangerous threshold. + // 2. Deprecated logics (unused by default) + return mayInsertNodeMemoryReachDangerousThreshold(event) || canNotUseTabletAnymoreDeprecated(event); } - private boolean mayRemainingInsertNodeEventExceedLimit(final PipeRealtimeEvent event) { - final boolean mayRemainingInsertEventExceedLimit = - PipeDataNodeSinglePipeMetrics.getInstance().mayRemainingInsertEventExceedLimit(pipeID); - if (mayRemainingInsertEventExceedLimit && event.mayExtractorUseTablets(this)) { - logByLogManager( - l -> - l.info( - "Pipe task {}@{} canNotUseTabletAnyMore(0): remaining insert event has reached max allowed insert event count {}", - pipeName, - dataRegionId, - PipeConfig.getInstance().getPipeMaxAllowedRemainingInsertEventCountPerPipe())); - } - return mayRemainingInsertEventExceedLimit; - } - - private boolean mayWalSizeReachThrottleThreshold(final PipeRealtimeEvent event) { - final boolean mayWalSizeReachThrottleThreshold = - 3 * WALManager.getInstance().getTotalDiskUsage() - > IoTDBDescriptor.getInstance().getConfig().getThrottleThreshold(); - if (mayWalSizeReachThrottleThreshold && event.mayExtractorUseTablets(this)) { - logByLogManager( - l -> - l.info( - "Pipe task {}@{} canNotUseTabletAnyMore(1): Wal size {} has reached throttle threshold {}", - pipeName, - dataRegionId, - WALManager.getInstance().getTotalDiskUsage(), - IoTDBDescriptor.getInstance().getConfig().getThrottleThreshold() / 3.0d)); - } - return mayWalSizeReachThrottleThreshold; - } - private boolean mayInsertNodeMemoryReachDangerousThreshold(final PipeRealtimeEvent event) { final long floatingMemoryUsageInByte = PipeDataNodeAgent.task().getFloatingMemoryUsageInByte(pipeName); @@ -276,27 +217,19 @@ private boolean mayInsertNodeMemoryReachDangerousThreshold(final PipeRealtimeEve final long totalFloatingMemorySizeInBytes = PipeMemoryManager.getTotalFloatingMemorySizeInBytes(); final boolean mayInsertNodeMemoryReachDangerousThreshold = - 3 * floatingMemoryUsageInByte * pipeCount >= 2 * totalFloatingMemorySizeInBytes; + floatingMemoryUsageInByte * pipeCount >= totalFloatingMemorySizeInBytes; if (mayInsertNodeMemoryReachDangerousThreshold && event.mayExtractorUseTablets(this)) { - logByLogManager( - l -> - l.info( - "Pipe task {}@{} canNotUseTabletAnyMore(2): The shallow memory usage of the insert node {} has reached the dangerous threshold {}", - pipeName, - dataRegionId, - floatingMemoryUsageInByte * pipeCount, - 2 * totalFloatingMemorySizeInBytes / 3.0d)); + LOGGER.info( + "Pipe task {}@{} canNotUseTabletAnyMore(1) for tsFile {}: The memory usage of the insert node {} has reached the dangerous threshold {}", + pipeName, + dataRegionId, + event.getTsFileEpoch().getFilePath(), + floatingMemoryUsageInByte * pipeCount, + totalFloatingMemorySizeInBytes); } return mayInsertNodeMemoryReachDangerousThreshold; } - private void logByLogManager(final Consumer infoFunction) { - PipeDataNodeResourceManager.log() - .schedule( - PipeRealtimeDataRegionHybridExtractor.class, getTaskID(), Integer.MAX_VALUE, 100, 1) - .ifPresent(infoFunction); - } - /** * These judgements are deprecated, and are only reserved for manual operation and compatibility. */ @@ -304,54 +237,14 @@ PipeRealtimeDataRegionHybridExtractor.class, getTaskID(), Integer.MAX_VALUE, 100 private boolean canNotUseTabletAnymoreDeprecated(final PipeRealtimeEvent event) { // In the following 5 cases, we should not extract any more tablet events. all the data // represented by the tablet events should be carried by the following tsfile event: - // 0. If the pipe task is currently restarted. - // 1. The number of pinned memTables has reached the dangerous threshold. - // 2. The number of historical tsFile events to transfer has exceeded the limit. - // 3. The number of realtime tsfile events to transfer has exceeded the limit. - // 4. The number of linked tsFiles has reached the dangerous threshold. - return isPipeTaskCurrentlyRestarted(event) - || mayMemTablePinnedCountReachDangerousThreshold(event) - || isHistoricalTsFileEventCountExceededLimit(event) + // 1. The number of historical tsFile events to transfer has exceeded the limit. + // 2. The number of realtime tsfile events to transfer has exceeded the limit. + // 3. The number of linked tsFiles has reached the dangerous threshold. + return isHistoricalTsFileEventCountExceededLimit(event) || isRealtimeTsFileEventCountExceededLimit(event) || mayTsFileLinkedCountReachDangerousThreshold(event); } - private boolean isPipeTaskCurrentlyRestarted(final PipeRealtimeEvent event) { - if (!PipeConfig.getInstance().isPipeEpochKeepTsFileAfterStuckRestartEnabled()) { - return false; - } - - final boolean isPipeTaskCurrentlyRestarted = - PipeDataNodeAgent.task().isPipeTaskCurrentlyRestarted(pipeName); - if (isPipeTaskCurrentlyRestarted && event.mayExtractorUseTablets(this)) { - LOGGER.info( - "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(0): Pipe task is currently restarted", - pipeName, - dataRegionId); - } - return isPipeTaskCurrentlyRestarted; - } - - private boolean mayMemTablePinnedCountReachDangerousThreshold(final PipeRealtimeEvent event) { - if (PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount() == Integer.MAX_VALUE) { - return false; - } - final boolean mayMemTablePinnedCountReachDangerousThreshold = - PipeDataNodeResourceManager.wal().getPinnedWalCount() - >= PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount() - * StorageEngine.getInstance().getDataRegionNumber(); - if (mayMemTablePinnedCountReachDangerousThreshold && event.mayExtractorUseTablets(this)) { - LOGGER.info( - "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(1): The number of pinned memTables {} has reached the dangerous threshold {}", - pipeName, - dataRegionId, - PipeDataNodeResourceManager.wal().getPinnedWalCount(), - PipeConfig.getInstance().getPipeMaxAllowedPinnedMemTableCount() - * StorageEngine.getInstance().getDataRegionNumber()); - } - return mayMemTablePinnedCountReachDangerousThreshold; - } - private boolean isHistoricalTsFileEventCountExceededLimit(final PipeRealtimeEvent event) { if (PipeConfig.getInstance().getPipeMaxAllowedHistoricalTsFilePerDataRegion() == Integer.MAX_VALUE) { @@ -365,9 +258,10 @@ private boolean isHistoricalTsFileEventCountExceededLimit(final PipeRealtimeEven >= PipeConfig.getInstance().getPipeMaxAllowedHistoricalTsFilePerDataRegion(); if (isHistoricalTsFileEventCountExceededLimit && event.mayExtractorUseTablets(this)) { LOGGER.info( - "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(2): The number of historical tsFile events {} has exceeded the limit {}", + "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(1) for tsFile {}: The number of historical tsFile events {} has exceeded the limit {}", pipeName, dataRegionId, + event.getTsFileEpoch().getFilePath(), extractor.getHistoricalTsFileInsertionEventCount(), PipeConfig.getInstance().getPipeMaxAllowedHistoricalTsFilePerDataRegion()); } @@ -384,9 +278,10 @@ private boolean isRealtimeTsFileEventCountExceededLimit(final PipeRealtimeEvent >= PipeConfig.getInstance().getPipeMaxAllowedPendingTsFileEpochPerDataRegion(); if (isRealtimeTsFileEventCountExceededLimit && event.mayExtractorUseTablets(this)) { LOGGER.info( - "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(3): The number of realtime tsFile events {} has exceeded the limit {}", + "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(2) for tsFile {}: The number of realtime tsFile events {} has exceeded the limit {}", pipeName, dataRegionId, + event.getTsFileEpoch().getFilePath(), pendingQueue.getTsFileInsertionEventCount(), PipeConfig.getInstance().getPipeMaxAllowedPendingTsFileEpochPerDataRegion()); } @@ -398,14 +293,15 @@ private boolean mayTsFileLinkedCountReachDangerousThreshold(final PipeRealtimeEv return false; } final boolean mayTsFileLinkedCountReachDangerousThreshold = - PipeDataNodeResourceManager.tsfile().getLinkedTsfileCount() + PipeDataNodeResourceManager.tsfile().getLinkedTsFileCount(pipeName) >= PipeConfig.getInstance().getPipeMaxAllowedLinkedTsFileCount(); if (mayTsFileLinkedCountReachDangerousThreshold && event.mayExtractorUseTablets(this)) { LOGGER.info( - "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(4): The number of linked tsFiles {} has reached the dangerous threshold {}", + "Pipe task {}@{} canNotUseTabletAnymoreDeprecated(3) for tsFile {}: The number of linked tsFiles {} has reached the dangerous threshold {}", pipeName, dataRegionId, - PipeDataNodeResourceManager.tsfile().getLinkedTsfileCount(), + event.getTsFileEpoch().getFilePath(), + PipeDataNodeResourceManager.tsfile().getLinkedTsFileCount(pipeName), PipeConfig.getInstance().getPipeMaxAllowedLinkedTsFileCount()); } return mayTsFileLinkedCountReachDangerousThreshold; @@ -440,6 +336,7 @@ public Event supply() { PipeRealtimeDataRegionHybridExtractor.class.getName(), false); if (suppliedEvent != null) { + maySkipIndex4Event(realtimeEvent); return suppliedEvent; } @@ -473,11 +370,14 @@ private Event supplyTabletInsertion(final PipeRealtimeEvent event) { }); final TsFileEpoch.State state = event.getTsFileEpoch().getState(this); + if (state == TsFileEpoch.State.USING_TSFILE) { + PipeTsFileEpochProgressIndexKeeper.getInstance() + .registerProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getResource()); + } + switch (state) { case USING_TSFILE: // If the state is USING_TSFILE, discard the event and poll the next one. - PipeTsFileEpochProgressIndexKeeper.getInstance() - .eliminateProgressIndex(dataRegionId, event.getTsFileEpoch().getFilePath()); return null; case EMPTY: case USING_TABLET: @@ -518,6 +418,8 @@ private Event supplyTsFileInsertion(final PipeRealtimeEvent event) { switch (state) { case USING_TABLET: // If the state is USING_TABLET, discard the event and poll the next one. + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getFilePath()); return null; case EMPTY: case USING_TSFILE: @@ -539,6 +441,8 @@ private Event supplyTsFileInsertion(final PipeRealtimeEvent event) { LOGGER.error(errorMessage); PipeDataNodeAgent.runtime() .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getFilePath()); return null; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java index 4b300355c80e..7549042e676c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionLogExtractor.java @@ -82,10 +82,7 @@ private void extractTabletInsertion(PipeRealtimeEvent event) { private void extractTsFileInsertion(PipeRealtimeEvent event) { final PipeTsFileInsertionEvent tsFileInsertionEvent = (PipeTsFileInsertionEvent) event.getEvent(); - if (!(tsFileInsertionEvent.isLoaded() - // some insert nodes in the tsfile epoch are not captured by pipe - || tsFileInsertionEvent.getFileStartTime() - < event.getTsFileEpoch().getInsertNodeMinTime())) { + if (!(tsFileInsertionEvent.isLoaded())) { // All data in the tsfile epoch has been extracted in tablet mode, so we should // simply ignore this event. event.decreaseReferenceCount(PipeRealtimeDataRegionLogExtractor.class.getName(), false); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java index 8072499b3daf..f189af5726de 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionTsFileExtractor.java @@ -25,6 +25,7 @@ import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; import org.apache.iotdb.db.pipe.event.common.schema.PipeSchemaRegionWritePlanEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; +import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.epoch.TsFileEpoch; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; @@ -50,6 +51,8 @@ protected void doExtract(PipeRealtimeEvent event) { } event.getTsFileEpoch().migrateState(this, state -> TsFileEpoch.State.USING_TSFILE); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .registerProgressIndex(dataRegionId, pipeName, event.getTsFileEpoch().getResource()); if (!(event.getEvent() instanceof TsFileInsertionEvent)) { event.decreaseReferenceCount(PipeRealtimeDataRegionTsFileExtractor.class.getName(), false); @@ -111,12 +114,16 @@ public Event supply() { LOGGER.error(errorMessage); PipeDataNodeAgent.runtime() .report(pipeTaskMeta, new PipeRuntimeNonCriticalException(errorMessage)); + PipeTsFileEpochProgressIndexKeeper.getInstance() + .eliminateProgressIndex( + dataRegionId, pipeName, realtimeEvent.getTsFileEpoch().getFilePath()); } realtimeEvent.decreaseReferenceCount( PipeRealtimeDataRegionTsFileExtractor.class.getName(), false); if (suppliedEvent != null) { + maySkipIndex4Event(realtimeEvent); return suppliedEvent; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java index ed91f636ac1a..8e415772438c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeDataRegionAssigner.java @@ -26,7 +26,6 @@ import org.apache.iotdb.commons.pipe.event.ProgressReportEvent; import org.apache.iotdb.commons.pipe.metric.PipeEventCounter; import org.apache.iotdb.db.pipe.event.common.heartbeat.PipeHeartbeatEvent; -import org.apache.iotdb.db.pipe.event.common.tablet.PipeInsertNodeTabletInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEvent; import org.apache.iotdb.db.pipe.event.realtime.PipeRealtimeEventFactory; @@ -35,7 +34,6 @@ import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.matcher.PipeDataRegionMatcher; import org.apache.iotdb.db.pipe.metric.source.PipeAssignerMetrics; import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionEventCounter; -import org.apache.iotdb.pipe.api.event.dml.insertion.TsFileInsertionEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,8 +58,6 @@ public class PipeDataRegionAssigner implements Closeable { private final String dataRegionId; - private int counter = 0; - private final AtomicReference maxProgressIndexForRealtimeEvent = new AtomicReference<>(MinimumProgressIndex.INSTANCE); @@ -107,10 +103,11 @@ private void onAssignedHook(final PipeRealtimeEvent realtimeEvent) { realtimeEvent.decreaseReferenceCount(PipeDataRegionAssigner.class.getName(), false); final EnrichedEvent innerEvent = realtimeEvent.getEvent(); - eventCounter.decreaseEventCount(innerEvent); if (innerEvent instanceof PipeHeartbeatEvent) { ((PipeHeartbeatEvent) innerEvent).onAssigned(); } + + eventCounter.decreaseEventCount(innerEvent); } private void assignToExtractor( @@ -128,16 +125,6 @@ private void assignToExtractor( } if (event.getEvent().isGeneratedByPipe() && !extractor.isForwardingPipeRequests()) { - // The frequency of progress reports is limited by the counter, while progress - // reports to TsFileInsertionEvent are not limited. - if (!(event.getEvent() instanceof TsFileInsertionEvent)) { - if (counter < PIPE_CONFIG.getPipeNonForwardingEventsProgressReportInterval()) { - counter++; - return; - } - counter = 0; - } - final ProgressReportEvent reportEvent = new ProgressReportEvent( extractor.getPipeName(), @@ -173,11 +160,6 @@ private void assignToExtractor( extractor.isShouldTransferModFile()); } - if (innerEvent instanceof PipeTsFileInsertionEvent - || innerEvent instanceof PipeInsertNodeTabletInsertionEvent) { - bindOrUpdateProgressIndexForRealtimeEvent(copiedEvent); - } - if (!copiedEvent.increaseReferenceCount(PipeDataRegionAssigner.class.getName())) { LOGGER.warn( "The reference count of the event {} cannot be increased, skipping it.", @@ -188,34 +170,6 @@ private void assignToExtractor( }); } - private void bindOrUpdateProgressIndexForRealtimeEvent(final PipeRealtimeEvent event) { - if (PipeTsFileEpochProgressIndexKeeper.getInstance() - .isProgressIndexAfterOrEquals( - dataRegionId, - event.getTsFileEpoch().getFilePath(), - getProgressIndex4RealtimeEvent(event))) { - event.bindProgressIndex(maxProgressIndexForRealtimeEvent.get()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug( - "Data region {} bind {} to event {} because it was flushed prematurely.", - dataRegionId, - maxProgressIndexForRealtimeEvent, - event.coreReportMessage()); - } - } else { - maxProgressIndexForRealtimeEvent.updateAndGet( - index -> - index.updateToMinimumEqualOrIsAfterProgressIndex( - getProgressIndex4RealtimeEvent(event))); - } - } - - private ProgressIndex getProgressIndex4RealtimeEvent(final PipeRealtimeEvent event) { - return event.getEvent() instanceof PipeTsFileInsertionEvent - ? ((PipeTsFileInsertionEvent) event.getEvent()).forceGetProgressIndex() - : event.getProgressIndex(); - } - public void startAssignTo(final PipeRealtimeDataRegionExtractor extractor) { matcher.register(extractor); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeTsFileEpochProgressIndexKeeper.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeTsFileEpochProgressIndexKeeper.java index da2cde90667e..50b7d778fc21 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeTsFileEpochProgressIndexKeeper.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/assigner/PipeTsFileEpochProgressIndexKeeper.java @@ -20,6 +20,9 @@ package org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner; import org.apache.iotdb.commons.consensus.index.ProgressIndex; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; + +import javax.annotation.Nonnull; import java.util.Map; import java.util.Map.Entry; @@ -28,39 +31,40 @@ public class PipeTsFileEpochProgressIndexKeeper { - // data region id -> (tsFile path, max progress index) - private final Map> progressIndexKeeper = + // data region id -> pipeName -> tsFile path -> max progress index + private final Map>> progressIndexKeeper = new ConcurrentHashMap<>(); - public synchronized void updateProgressIndex( - final String dataRegionId, final String tsFileName, final ProgressIndex progressIndex) { + public synchronized void registerProgressIndex( + final String dataRegionId, final String pipeName, final TsFileResource resource) { progressIndexKeeper .computeIfAbsent(dataRegionId, k -> new ConcurrentHashMap<>()) - .compute( - tsFileName, - (k, v) -> - v == null - ? progressIndex - : v.updateToMinimumEqualOrIsAfterProgressIndex(progressIndex)); + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) + .putIfAbsent(resource.getTsFilePath(), resource); } public synchronized void eliminateProgressIndex( - final String dataRegionId, final String filePath) { + final String dataRegionId, final @Nonnull String pipeName, final String filePath) { progressIndexKeeper .computeIfAbsent(dataRegionId, k -> new ConcurrentHashMap<>()) + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) .remove(filePath); } public synchronized boolean isProgressIndexAfterOrEquals( - final String dataRegionId, final String tsFilePath, final ProgressIndex progressIndex) { + final String dataRegionId, + final String pipeName, + final String tsFilePath, + final ProgressIndex progressIndex) { return progressIndexKeeper .computeIfAbsent(dataRegionId, k -> new ConcurrentHashMap<>()) + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) .entrySet() .stream() .filter(entry -> !Objects.equals(entry.getKey(), tsFilePath)) .map(Entry::getValue) .filter(Objects::nonNull) - .anyMatch(index -> !index.isAfter(progressIndex)); + .anyMatch(resource -> !resource.getMaxProgressIndex().isAfter(progressIndex)); } //////////////////////////// singleton //////////////////////////// diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpoch.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpoch.java index c2db4c77c86d..83b69ea60ec5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpoch.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpoch.java @@ -21,6 +21,7 @@ import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; import org.apache.iotdb.db.pipe.metric.source.PipeDataRegionExtractorMetrics; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -29,13 +30,13 @@ public class TsFileEpoch { - private final String filePath; + private final TsFileResource resource; private final ConcurrentMap> dataRegionExtractor2State; private final AtomicLong insertNodeMinTime; - public TsFileEpoch(final String filePath) { - this.filePath = filePath; + public TsFileEpoch(final TsFileResource resource) { + this.resource = resource; this.dataRegionExtractor2State = new ConcurrentHashMap<>(); this.insertNodeMinTime = new AtomicLong(Long.MAX_VALUE); } @@ -64,19 +65,19 @@ public void updateInsertNodeMinTime(final long newComingMinTime) { insertNodeMinTime.updateAndGet(recordedMinTime -> Math.min(recordedMinTime, newComingMinTime)); } - public long getInsertNodeMinTime() { - return insertNodeMinTime.get(); + public TsFileResource getResource() { + return resource; } public String getFilePath() { - return filePath; + return resource.getTsFilePath(); } @Override public String toString() { return "TsFileEpoch{" - + "filePath='" - + filePath + + "resource='" + + resource + '\'' + ", dataRegionExtractor2State=" + dataRegionExtractor2State diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochManager.java index 6ee0a7384526..9e18258dd17d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/epoch/TsFileEpochManager.java @@ -56,7 +56,7 @@ public PipeRealtimeEvent bindPipeTsFileInsertionEvent( filePath, path -> { LOGGER.info("TsFileEpoch not found for TsFile {}, creating a new one", path); - return new TsFileEpoch(path); + return new TsFileEpoch(resource); }); final TsFileEpoch epoch = filePath2Epoch.remove(filePath); @@ -79,7 +79,7 @@ public PipeRealtimeEvent bindPipeTsFileInsertionEvent( public PipeRealtimeEvent bindPipeInsertNodeTabletInsertionEvent( PipeInsertNodeTabletInsertionEvent event, InsertNode node, TsFileResource resource) { final TsFileEpoch epoch = - filePath2Epoch.computeIfAbsent(resource.getTsFilePath(), TsFileEpoch::new); + filePath2Epoch.computeIfAbsent(resource.getTsFilePath(), k -> new TsFileEpoch(resource)); epoch.updateInsertNodeMinTime(node.getMinTime()); return new PipeRealtimeEvent( event, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java index 452c7188dec3..86ac909b0deb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/listener/PipeInsertionDataNodeListener.java @@ -27,7 +27,6 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.DeleteDataNode; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -95,11 +94,7 @@ public synchronized void stopListenAndAssign( //////////////////////////// listen to events //////////////////////////// public void listenToTsFile( - final String dataRegionId, - final TsFileResource tsFileResource, - final boolean isLoaded, - final boolean isGeneratedByPipe) { - tsFileResource.setGeneratedByPipe(isGeneratedByPipe); + final String dataRegionId, final TsFileResource tsFileResource, final boolean isLoaded) { // We don't judge whether listenToTsFileExtractorCount.get() == 0 here on purpose // because extractors may use tsfile events when some exceptions occur in the // insert nodes listening process. @@ -116,10 +111,7 @@ public void listenToTsFile( } public void listenToInsertNode( - String dataRegionId, - WALEntryHandler walEntryHandler, - InsertNode insertNode, - TsFileResource tsFileResource) { + String dataRegionId, InsertNode insertNode, TsFileResource tsFileResource) { if (listenToInsertNodeExtractorCount.get() == 0) { return; } @@ -132,7 +124,7 @@ public void listenToInsertNode( } assigner.publishToAssign( - PipeRealtimeEventFactory.createRealtimeEvent(walEntryHandler, insertNode, tsFileResource)); + PipeRealtimeEventFactory.createRealtimeEvent(insertNode, tsFileResource)); } public void listenToHeartbeat(boolean shouldPrintMessage) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java index dd3873feaf46..d7f1577a85c6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeMetrics.java @@ -24,7 +24,6 @@ import org.apache.iotdb.db.pipe.metric.overview.PipeHeartbeatEventMetrics; import org.apache.iotdb.db.pipe.metric.overview.PipeResourceMetrics; import org.apache.iotdb.db.pipe.metric.overview.PipeTsFileToTabletsMetrics; -import org.apache.iotdb.db.pipe.metric.overview.PipeWALInsertNodeCacheMetrics; import org.apache.iotdb.db.pipe.metric.processor.PipeProcessorMetrics; import org.apache.iotdb.db.pipe.metric.receiver.PipeDataNodeReceiverMetrics; import org.apache.iotdb.db.pipe.metric.schema.PipeSchemaRegionConnectorMetrics; @@ -47,7 +46,6 @@ public void bindTo(final AbstractMetricService metricService) { PipeProcessorMetrics.getInstance().bindTo(metricService); PipeDataRegionConnectorMetrics.getInstance().bindTo(metricService); PipeHeartbeatEventMetrics.getInstance().bindTo(metricService); - PipeWALInsertNodeCacheMetrics.getInstance().bindTo(metricService); PipeResourceMetrics.getInstance().bindTo(metricService); PipeEventCommitMetrics.getInstance().bindTo(metricService); PipeSchemaRegionListenerMetrics.getInstance().bindTo(metricService); @@ -65,7 +63,6 @@ public void unbindFrom(final AbstractMetricService metricService) { PipeProcessorMetrics.getInstance().unbindFrom(metricService); PipeDataRegionConnectorMetrics.getInstance().unbindFrom(metricService); PipeHeartbeatEventMetrics.getInstance().unbindFrom(metricService); - PipeWALInsertNodeCacheMetrics.getInstance().unbindFrom(metricService); PipeResourceMetrics.getInstance().unbindFrom(metricService); PipeEventCommitMetrics.getInstance().unbindFrom(metricService); PipeSchemaRegionListenerMetrics.getInstance().unbindFrom(metricService); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java index 73d582853458..0308e9b5b63a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeRemainingEventAndTimeOperator.java @@ -59,8 +59,6 @@ public class PipeDataNodeRemainingEventAndTimeOperator extends PipeRemainingOper private Timer insertNodeTransferTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer tsfileTransferTimer = DoNothingMetricManager.DO_NOTHING_TIMER; - private final InsertNodeEMA insertNodeEventCountEMA = new InsertNodeEMA(); - private double lastDataRegionCommitSmoothingValue = Long.MAX_VALUE; private double lastSchemaRegionCommitSmoothingValue = Long.MAX_VALUE; @@ -102,11 +100,6 @@ void decreaseHeartbeatEventCount() { heartbeatEventCount.decrementAndGet(); } - double getRemainingInsertEventSmoothingCount() { - insertNodeEventCountEMA.update(insertNodeEventCount.get()); - return insertNodeEventCountEMA.insertNodeEMAValue; - } - public long getRemainingNonHeartbeatEvents() { final long remainingEvents = tsfileEventCount.get() @@ -147,7 +140,7 @@ long getRemainingEvents() { * * @return The estimated remaining time */ - double getRemainingTime() { + public double getRemainingTime() { final PipeRateAverage pipeRemainingTimeCommitRateAverageTime = PipeConfig.getInstance().getPipeRemainingTimeCommitRateAverageTime(); @@ -282,17 +275,4 @@ public synchronized void freezeRate(final boolean isStopPipe) { dataRegionCommitMeter.set(null); schemaRegionCommitMeter.set(null); } - - private static class InsertNodeEMA { - private double insertNodeEMAValue; - - public void update(final double newValue) { - final double alpha = PipeConfig.getInstance().getPipeRemainingInsertNodeCountEMAAlpha(); - if (insertNodeEMAValue == 0) { - insertNodeEMAValue = newValue; - } else { - insertNodeEMAValue = alpha * newValue + (1 - alpha) * insertNodeEMAValue; - } - } - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java index 677d758a162a..1840093a3475 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeDataNodeSinglePipeMetrics.java @@ -20,11 +20,12 @@ package org.apache.iotdb.db.pipe.metric.overview; import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; -import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.impl.DoNothingMetricManager; import org.apache.iotdb.metrics.metricsets.IMetricSet; @@ -54,7 +55,7 @@ public class PipeDataNodeSinglePipeMetrics implements IMetricSet { private static Histogram PIPE_DATANODE_INSERTNODE_TRANSFER_TIME_HISTOGRAM = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; - private static Histogram PIPE_DATANODE_EVENT_TRANSFER_TIME_HISTOGRAM = + private static Histogram PIPE_DATANODE_TSFILE_TRANSFER_TIME_HISTOGRAM = DoNothingMetricManager.DO_NOTHING_HISTOGRAM; //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @@ -68,7 +69,7 @@ public void bindTo(final AbstractMetricService metricService) { MetricLevel.IMPORTANT, Tag.NAME.toString(), "insert_node"); - PIPE_DATANODE_EVENT_TRANSFER_TIME_HISTOGRAM = + PIPE_DATANODE_TSFILE_TRANSFER_TIME_HISTOGRAM = metricService.getOrCreateHistogram( Metric.PIPE_DATANODE_EVENT_TRANSFER.toString(), MetricLevel.IMPORTANT, @@ -103,6 +104,35 @@ private void createAutoGauge(final String pipeID) { Tag.CREATION_TIME.toString(), String.valueOf(operator.getCreationTime())); + // Resources + metricService.createAutoGauge( + Metric.PIPE_FLOATING_MEMORY_USAGE.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeAgent.task(), + a -> a.getFloatingMemoryUsageInByte(operator.getPipeName()), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.createAutoGauge( + Metric.PIPE_LINKED_TSFILE_COUNT.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeResourceManager.tsfile(), + a -> a.getLinkedTsFileCount(operator.getPipeName()), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.createAutoGauge( + Metric.PIPE_LINKED_TSFILE_SIZE.toString(), + MetricLevel.IMPORTANT, + PipeDataNodeResourceManager.tsfile(), + a -> a.getTotalLinkedTsFileSize(operator.getPipeName()), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + operator.setInsertNodeTransferTimer( metricService.getOrCreateTimer( Metric.PIPE_INSERT_NODE_EVENT_TRANSFER_TIME.toString(), @@ -118,31 +148,6 @@ private void createAutoGauge(final String pipeID) { operator.getPipeName())); } - public boolean mayRemainingInsertEventExceedLimit(final String pipeID) { - if (Objects.isNull(metricService)) { - return true; - } - - if (remainingEventAndTimeOperatorMap.values().stream() - .map(PipeDataNodeRemainingEventAndTimeOperator::getRemainingInsertEventSmoothingCount) - .reduce(0d, Double::sum) - > PipeConfig.getInstance().getPipeMaxAllowedTotalRemainingInsertEventCount()) { - return true; - } - - final PipeDataNodeRemainingEventAndTimeOperator operator = - remainingEventAndTimeOperatorMap.get(pipeID); - if (Objects.isNull(operator)) { - LOGGER.warn( - "Failed to get remaining insert event, RemainingEventAndTimeOperator({}) does not exist, will degrade anyway", - pipeID); - return true; - } - - return operator.getRemainingInsertEventSmoothingCount() - > PipeConfig.getInstance().getPipeMaxAllowedRemainingInsertEventCountPerPipe(); - } - @Override public void unbindFrom(final AbstractMetricService metricService) { ImmutableSet.copyOf(remainingEventAndTimeOperatorMap.keySet()).forEach(this::deregister); @@ -184,6 +189,27 @@ private void removeAutoGauge(final String pipeID) { operator.getPipeName(), Tag.CREATION_TIME.toString(), String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_FLOATING_MEMORY_USAGE.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_LINKED_TSFILE_COUNT.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_LINKED_TSFILE_SIZE.toString(), + Tag.NAME.toString(), + operator.getPipeName(), + Tag.CREATION_TIME.toString(), + String.valueOf(operator.getCreationTime())); metricService.remove( MetricType.TIMER, Metric.PIPE_INSERT_NODE_EVENT_TRANSFER_TIME.toString(), @@ -280,7 +306,7 @@ public void decreaseTsFileEventCount( operator.decreaseTsFileEventCount(); operator.getTsFileTransferTimer().update(transferTime, TimeUnit.NANOSECONDS); - PIPE_DATANODE_EVENT_TRANSFER_TIME_HISTOGRAM.update(transferTime); + PIPE_DATANODE_TSFILE_TRANSFER_TIME_HISTOGRAM.update(transferTime); } public void increaseHeartbeatEventCount(final String pipeName, final long creationTime) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java index c9115575000c..37f8eb5e26aa 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeResourceMetrics.java @@ -22,10 +22,9 @@ import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; import org.apache.iotdb.commons.service.metric.enums.Metric; import org.apache.iotdb.commons.service.metric.enums.Tag; +import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; -import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; import org.apache.iotdb.metrics.AbstractMetricService; import org.apache.iotdb.metrics.metricsets.IMetricSet; import org.apache.iotdb.metrics.utils.MetricLevel; @@ -34,6 +33,7 @@ public class PipeResourceMetrics implements IMetricSet { private static final String PIPE_USED_MEMORY = "PipeUsedMemory"; + private static final String PIPE_USED_FLOATING_MEMORY = "PipeUsedFloatingMemory"; private static final String PIPE_TABLET_USED_MEMORY = "PipeTabletUsedMemory"; @@ -41,6 +41,8 @@ public class PipeResourceMetrics implements IMetricSet { private static final String PIPE_TOTAL_MEMORY = "PipeTotalMemory"; + private static final String PIPE_FLOATING_MEMORY = "PipeFloatingMemory"; + //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// @Override @@ -74,22 +76,20 @@ public void bindTo(final AbstractMetricService metricService) { o -> PipeMemoryManager.getTotalNonFloatingMemorySizeInBytes(), Tag.NAME.toString(), PIPE_TOTAL_MEMORY); - // resource reference count metricService.createAutoGauge( - Metric.PIPE_PINNED_MEMTABLE_COUNT.toString(), - MetricLevel.IMPORTANT, - PipeDataNodeResourceManager.wal(), - PipeWALResourceManager::getPinnedWalCount); - metricService.createAutoGauge( - Metric.PIPE_LINKED_TSFILE_COUNT.toString(), + Metric.PIPE_MEM.toString(), MetricLevel.IMPORTANT, - PipeDataNodeResourceManager.tsfile(), - PipeTsFileResourceManager::getLinkedTsfileCount); + PipeDataNodeResourceManager.memory(), + o -> PipeMemoryManager.getTotalFloatingMemorySizeInBytes(), + Tag.NAME.toString(), + PIPE_FLOATING_MEMORY); metricService.createAutoGauge( - Metric.PIPE_LINKED_TSFILE_SIZE.toString(), + Metric.PIPE_MEM.toString(), MetricLevel.IMPORTANT, - PipeDataNodeResourceManager.tsfile(), - PipeTsFileResourceManager::getTotalLinkedTsfileSize); + PipeDataNodeResourceManager.memory(), + o -> PipeDataNodeAgent.task().getAllFloatingMemoryUsageInByte(), + Tag.NAME.toString(), + PIPE_USED_FLOATING_MEMORY); // phantom reference count metricService.createAutoGauge( Metric.PIPE_PHANTOM_REFERENCE_COUNT.toString(), @@ -115,8 +115,17 @@ public void unbindFrom(final AbstractMetricService metricService) { PIPE_TS_FILE_USED_MEMORY); metricService.remove( MetricType.AUTO_GAUGE, Metric.PIPE_MEM.toString(), Tag.NAME.toString(), PIPE_TOTAL_MEMORY); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_MEM.toString(), + Tag.NAME.toString(), + PIPE_FLOATING_MEMORY); + metricService.remove( + MetricType.AUTO_GAUGE, + Metric.PIPE_MEM.toString(), + Tag.NAME.toString(), + PIPE_USED_FLOATING_MEMORY); // resource reference count - metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_PINNED_MEMTABLE_COUNT.toString()); metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_LINKED_TSFILE_COUNT.toString()); metricService.remove(MetricType.AUTO_GAUGE, Metric.PIPE_LINKED_TSFILE_SIZE.toString()); // phantom reference count diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeWALInsertNodeCacheMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeWALInsertNodeCacheMetrics.java deleted file mode 100644 index b2e605bf84c1..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/overview/PipeWALInsertNodeCacheMetrics.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.metric.overview; - -import org.apache.iotdb.commons.service.metric.enums.Metric; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache; -import org.apache.iotdb.metrics.AbstractMetricService; -import org.apache.iotdb.metrics.metricsets.IMetricSet; -import org.apache.iotdb.metrics.utils.MetricLevel; -import org.apache.iotdb.metrics.utils.MetricType; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class PipeWALInsertNodeCacheMetrics implements IMetricSet { - - private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALInsertNodeCacheMetrics.class); - - //////////////////////////// bindTo & unbindFrom (metric framework) //////////////////////////// - - @Override - public void bindTo(AbstractMetricService metricService) { - metricService.createAutoGauge( - Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE.toString(), - MetricLevel.IMPORTANT, - WALInsertNodeCache.getInstance(), - WALInsertNodeCache::getCacheHitRate); - metricService.createAutoGauge( - Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT.toString(), - MetricLevel.IMPORTANT, - WALInsertNodeCache.getInstance(), - WALInsertNodeCache::getCacheHitCount); - metricService.createAutoGauge( - Metric.PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT.toString(), - MetricLevel.IMPORTANT, - WALInsertNodeCache.getInstance(), - WALInsertNodeCache::getCacheRequestCount); - } - - @Override - public void unbindFrom(AbstractMetricService metricService) { - metricService.remove( - MetricType.AUTO_GAUGE, Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE.toString()); - metricService.remove( - MetricType.AUTO_GAUGE, Metric.PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT.toString()); - metricService.remove( - MetricType.AUTO_GAUGE, Metric.PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT.toString()); - } - - //////////////////////////// singleton //////////////////////////// - - private static class PipeWALInsertNodeCacheMetricsHolder { - - private static final PipeWALInsertNodeCacheMetrics INSTANCE = - new PipeWALInsertNodeCacheMetrics(); - - private PipeWALInsertNodeCacheMetricsHolder() { - // empty constructor - } - } - - public static PipeWALInsertNodeCacheMetrics getInstance() { - return PipeWALInsertNodeCacheMetricsHolder.INSTANCE; - } - - private PipeWALInsertNodeCacheMetrics() { - // empty constructor - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionConnectorMetrics.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionConnectorMetrics.java index 80ebb272c96e..49ac24f3e4cd 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionConnectorMetrics.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/sink/PipeDataRegionConnectorMetrics.java @@ -23,7 +23,9 @@ import org.apache.iotdb.commons.service.metric.enums.Tag; import org.apache.iotdb.db.pipe.agent.task.subtask.connector.PipeConnectorSubtask; import org.apache.iotdb.metrics.AbstractMetricService; +import org.apache.iotdb.metrics.impl.DoNothingHistogram; import org.apache.iotdb.metrics.metricsets.IMetricSet; +import org.apache.iotdb.metrics.type.Histogram; import org.apache.iotdb.metrics.type.Rate; import org.apache.iotdb.metrics.type.Timer; import org.apache.iotdb.metrics.utils.MetricLevel; @@ -44,6 +46,14 @@ public class PipeDataRegionConnectorMetrics implements IMetricSet { private static final Logger LOGGER = LoggerFactory.getLogger(PipeDataRegionConnectorMetrics.class); + public static Histogram tabletBatchSizeHistogram = new DoNothingHistogram(); + + public static Histogram tsFileBatchSizeHistogram = new DoNothingHistogram(); + + public static Histogram tabletBatchTimeIntervalHistogram = new DoNothingHistogram(); + + public static Histogram tsFileBatchTimeIntervalHistogram = new DoNothingHistogram(); + @SuppressWarnings("java:S3077") private volatile AbstractMetricService metricService; @@ -66,6 +76,22 @@ public void bindTo(final AbstractMetricService metricService) { for (String taskID : taskIDs) { createMetrics(taskID); } + + tabletBatchSizeHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_INSERT_NODE_BATCH_SIZE.toString(), MetricLevel.IMPORTANT); + + tsFileBatchSizeHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_TSFILE_BATCH_SIZE.toString(), MetricLevel.IMPORTANT); + + tabletBatchTimeIntervalHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_INSERT_NODE_BATCH_TIME_COST.toString(), MetricLevel.IMPORTANT); + + tsFileBatchTimeIntervalHistogram = + metricService.getOrCreateHistogram( + Metric.PIPE_TSFILE_BATCH_TIME_COST.toString(), MetricLevel.IMPORTANT); } private void createMetrics(final String taskID) { @@ -230,6 +256,14 @@ public void unbindFrom(final AbstractMetricService metricService) { LOGGER.warn( "Failed to unbind from pipe data region connector metrics, connector map not empty"); } + + metricService.remove(MetricType.HISTOGRAM, Metric.PIPE_INSERT_NODE_BATCH_SIZE.toString()); + + metricService.remove(MetricType.HISTOGRAM, Metric.PIPE_TSFILE_BATCH_SIZE.toString()); + + metricService.remove(MetricType.HISTOGRAM, Metric.PIPE_INSERT_NODE_BATCH_TIME_COST.toString()); + + metricService.remove(MetricType.HISTOGRAM, Metric.PIPE_TSFILE_BATCH_TIME_COST.toString()); } private void removeMetrics(final String taskID) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java index cb1ba0b9ad9c..0e1f0f2b0334 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/CombineRequest.java @@ -114,7 +114,6 @@ private CombineRequest translateFromTPipeTransferReq(TPipeTransferReq transferRe version = transferReq.version; type = transferReq.type; - body = transferReq.body; return this; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java index b20904a0e2b2..752be403008c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/processor/twostage/exchange/payload/FetchCombineResultRequest.java @@ -99,7 +99,6 @@ private FetchCombineResultRequest translateFromTPipeTransferReq(TPipeTransferReq version = transferReq.version; type = transferReq.type; - body = transferReq.body; return this; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java index 00a7e4c46a09..5009cae8a516 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java @@ -174,6 +174,14 @@ public synchronized TPipeTransferResp receive(final TPipeTransferReq req) { case HANDSHAKE_DATANODE_V1: { try { + if (PipeConfig.getInstance().isPipeEnableMemoryCheck() + && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() + < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), + "The receiver memory is not enough to handle the handshake request from datanode.")); + } return handleTransferHandshakeV1( PipeTransferDataNodeHandshakeV1Req.fromTPipeTransferReq(req)); } finally { @@ -184,6 +192,14 @@ public synchronized TPipeTransferResp receive(final TPipeTransferReq req) { case HANDSHAKE_DATANODE_V2: { try { + if (PipeConfig.getInstance().isPipeEnableMemoryCheck() + && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() + < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), + "The receiver memory is not enough to handle the handshake request from datanode.")); + } return handleTransferHandshakeV2( PipeTransferDataNodeHandshakeV2Req.fromTPipeTransferReq(req)); } finally { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java index 573106e45c2e..aaf9eff454b8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/PipeDataNodeResourceManager.java @@ -19,24 +19,19 @@ package org.apache.iotdb.db.pipe.resource; -import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.resource.log.PipeLogManager; import org.apache.iotdb.commons.pipe.resource.ref.PipePhantomReferenceManager; import org.apache.iotdb.commons.pipe.resource.snapshot.PipeSnapshotResourceManager; import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryManager; import org.apache.iotdb.db.pipe.resource.ref.PipeDataNodePhantomReferenceManager; import org.apache.iotdb.db.pipe.resource.snapshot.PipeDataNodeSnapshotResourceManager; +import org.apache.iotdb.db.pipe.resource.tsfile.PipeCompactionManager; import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.hardlink.PipeWALHardlinkResourceManager; -import org.apache.iotdb.db.pipe.resource.wal.selfhost.PipeWALSelfHostResourceManager; - -import java.util.concurrent.atomic.AtomicReference; public class PipeDataNodeResourceManager { private final PipeTsFileResourceManager pipeTsFileResourceManager; - private final AtomicReference pipeWALResourceManager; + private final PipeCompactionManager pipeCompactionManager; private final PipeSnapshotResourceManager pipeSnapshotResourceManager; private final PipeMemoryManager pipeMemoryManager; private final PipeLogManager pipeLogManager; @@ -46,18 +41,8 @@ public static PipeTsFileResourceManager tsfile() { return PipeResourceManagerHolder.INSTANCE.pipeTsFileResourceManager; } - public static PipeWALResourceManager wal() { - if (PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get() == null) { - synchronized (PipeResourceManagerHolder.INSTANCE) { - if (PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get() == null) { - PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.set( - PipeConfig.getInstance().getPipeHardLinkWALEnabled() - ? new PipeWALHardlinkResourceManager() - : new PipeWALSelfHostResourceManager()); - } - } - } - return PipeResourceManagerHolder.INSTANCE.pipeWALResourceManager.get(); + public static PipeCompactionManager compaction() { + return PipeResourceManagerHolder.INSTANCE.pipeCompactionManager; } public static PipeSnapshotResourceManager snapshot() { @@ -80,7 +65,7 @@ public static PipePhantomReferenceManager ref() { private PipeDataNodeResourceManager() { pipeTsFileResourceManager = new PipeTsFileResourceManager(); - pipeWALResourceManager = new AtomicReference<>(); + pipeCompactionManager = new PipeCompactionManager(); pipeSnapshotResourceManager = new PipeDataNodeSnapshotResourceManager(); pipeMemoryManager = new PipeMemoryManager(); pipeLogManager = new PipeLogManager(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java index 690a7c540838..8a87cf8bad0a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryManager.java @@ -54,9 +54,6 @@ public class PipeMemoryManager { private volatile long usedMemorySizeInBytesOfTsFiles; - private static final double FLOATING_MEMORY_RATIO = - PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion(); - // Only non-zero memory blocks will be added to this set. private final Set allocatedBlocks = new HashSet<>(); @@ -104,18 +101,6 @@ private static double allowedMaxMemorySizeInBytesOfTsTiles() { * getTotalNonFloatingMemorySizeInBytes(); } - public long getAllocatedMemorySizeInBytesOfWAL() { - return (long) - (PipeConfig.getInstance().getPipeDataStructureWalMemoryProportion() - * getTotalNonFloatingMemorySizeInBytes()); - } - - public long getAllocatedMemorySizeInBytesOfBatch() { - return (long) - (PipeConfig.getInstance().getPipeDataStructureBatchMemoryProportion() - * getTotalNonFloatingMemorySizeInBytes()); - } - public boolean isEnough4TabletParsing() { return (double) usedMemorySizeInBytesOfTablets + (double) usedMemorySizeInBytesOfTsFiles < EXCEED_PROTECT_THRESHOLD * allowedMaxMemorySizeInBytesOfTabletsAndTsFiles() @@ -648,15 +633,29 @@ public long getUsedMemorySizeInBytesOfTsFiles() { return usedMemorySizeInBytesOfTsFiles; } + public long getAllocatedMemorySizeInBytesOfBatch() { + return (long) + (PipeConfig.getInstance().getPipeDataStructureBatchMemoryProportion() + * getTotalNonFloatingMemorySizeInBytes()); + } + public long getFreeMemorySizeInBytes() { return TOTAL_MEMORY_SIZE_IN_BYTES - usedMemorySizeInBytes; } public static long getTotalNonFloatingMemorySizeInBytes() { - return (long) (TOTAL_MEMORY_SIZE_IN_BYTES * (1 - FLOATING_MEMORY_RATIO)); + return (long) + (TOTAL_MEMORY_SIZE_IN_BYTES + * (1 - PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion())); } public static long getTotalFloatingMemorySizeInBytes() { - return (long) (TOTAL_MEMORY_SIZE_IN_BYTES * FLOATING_MEMORY_RATIO); + return (long) + (TOTAL_MEMORY_SIZE_IN_BYTES + * PipeConfig.getInstance().getPipeTotalFloatingMemoryProportion()); + } + + public static long getTotalMemorySizeInBytes() { + return TOTAL_MEMORY_SIZE_IN_BYTES; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java index cd9a0a79cae0..d82c26144d6c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/memory/PipeMemoryWeightUtil.java @@ -247,8 +247,8 @@ public static long calculateTabletSizeInBytes(Tablet tablet) { return totalSizeInBytes; } - public static long calculateBatchDataRamBytesUsed(BatchData batchData) { - long totalSizeInBytes = 0; + public static int calculateBatchDataRamBytesUsed(BatchData batchData) { + int totalSizeInBytes = 0; // timestamp totalSizeInBytes += 8; @@ -263,16 +263,16 @@ public static long calculateBatchDataRamBytesUsed(BatchData batchData) { continue; } // consider variable references (plus 8) and memory alignment (round up to 8) - totalSizeInBytes += roundUpToMultiple(primitiveType.getSize() + 8L, 8); + totalSizeInBytes += roundUpToMultiple(primitiveType.getSize() + 8, 8); } } else { if (type.isBinary()) { final Binary binary = batchData.getBinary(); // refer to org.apache.tsfile.utils.TsPrimitiveType.TsBinary.getSize totalSizeInBytes += - roundUpToMultiple((binary == null ? 8 : binary.ramBytesUsed()) + 8L, 8); + roundUpToMultiple((binary == null ? 8 : binary.getLength() + 8) + 8, 8); } else { - totalSizeInBytes += roundUpToMultiple(TsPrimitiveType.getByType(type).getSize() + 8L, 8); + totalSizeInBytes += roundUpToMultiple(TsPrimitiveType.getByType(type).getSize() + 8, 8); } } } @@ -287,7 +287,7 @@ public static long calculateBatchDataRamBytesUsed(BatchData batchData) { * @param n The specified multiple. * @return The nearest multiple of n greater than or equal to num. */ - private static long roundUpToMultiple(long num, int n) { + private static int roundUpToMultiple(int num, int n) { if (n == 0) { throw new IllegalArgumentException("The multiple n must be greater than 0"); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java new file mode 100644 index 000000000000..03860fda8f85 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeCompactionManager.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.tsfile; + +import org.apache.iotdb.commons.pipe.agent.task.connection.UnboundedBlockingPendingQueue; +import org.apache.iotdb.db.pipe.agent.task.subtask.connector.PipeConnectorSubtaskLifeCycle; +import org.apache.iotdb.db.pipe.agent.task.subtask.connector.PipeRealtimePriorityBlockingQueue; +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; +import org.apache.iotdb.pipe.api.event.Event; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; + +public class PipeCompactionManager { + + private final Set pipeConnectorSubtaskLifeCycles = + new CopyOnWriteArraySet<>(); + + public void registerPipeConnectorSubtaskLifeCycle( + final PipeConnectorSubtaskLifeCycle pipeConnectorSubtaskLifeCycle) { + pipeConnectorSubtaskLifeCycles.add(pipeConnectorSubtaskLifeCycle); + } + + public void deregisterPipeConnectorSubtaskLifeCycle( + final PipeConnectorSubtaskLifeCycle pipeConnectorSubtaskLifeCycle) { + pipeConnectorSubtaskLifeCycles.remove(pipeConnectorSubtaskLifeCycle); + } + + public void emitResult( + final String storageGroupName, + final String dataRegionId, + final long timePartition, + final List seqFileResources, + final List unseqFileResources, + final List targetFileResources) { + final Set sourceFileResources = new HashSet<>(seqFileResources); + sourceFileResources.addAll(unseqFileResources); + + for (final PipeConnectorSubtaskLifeCycle lifeCycle : pipeConnectorSubtaskLifeCycles) { + final UnboundedBlockingPendingQueue pendingQueue = lifeCycle.getPendingQueue(); + if (pendingQueue instanceof PipeRealtimePriorityBlockingQueue) { + final PipeRealtimePriorityBlockingQueue realtimePriorityBlockingQueue = + (PipeRealtimePriorityBlockingQueue) pendingQueue; + realtimePriorityBlockingQueue.replace( + dataRegionId, sourceFileResources, targetFileResources); + } + } + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java new file mode 100644 index 000000000000..47134fe117c9 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFilePublicResource.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.pipe.resource.tsfile; + +import org.apache.iotdb.commons.pipe.config.PipeConfig; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; +import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; + +import org.apache.tsfile.enums.TSDataType; +import org.apache.tsfile.file.metadata.IDeviceID; +import org.apache.tsfile.read.TsFileDeviceIterator; +import org.apache.tsfile.read.TsFileSequenceReader; +import org.apache.tsfile.utils.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class PipeTsFilePublicResource extends PipeTsFileResource { + private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFilePublicResource.class); + public static final float MEMORY_SUFFICIENT_THRESHOLD = 0.7f; + private PipeMemoryBlock allocatedMemoryBlock; + private Map> deviceMeasurementsMap = null; + private Map deviceIsAlignedMap = null; + private Map measurementDataTypeMap = null; + + public PipeTsFilePublicResource(File hardlinkOrCopiedFile) { + super(hardlinkOrCopiedFile); + } + + @Override + public void close() { + super.close(); + + if (deviceMeasurementsMap != null) { + deviceMeasurementsMap = null; + } + + if (deviceIsAlignedMap != null) { + deviceIsAlignedMap = null; + } + + if (measurementDataTypeMap != null) { + measurementDataTypeMap = null; + } + + if (allocatedMemoryBlock != null) { + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + } + } + + //////////////////////////// Cache Getter //////////////////////////// + + public synchronized Map> tryGetDeviceMeasurementsMap(final File tsFile) + throws IOException { + if (deviceMeasurementsMap == null) { + cacheObjectsIfAbsent(tsFile); + } + return deviceMeasurementsMap; + } + + public synchronized Map tryGetDeviceIsAlignedMap( + final boolean cacheOtherMetadata, final File tsFile) throws IOException { + if (deviceIsAlignedMap == null) { + if (cacheOtherMetadata) { + cacheObjectsIfAbsent(tsFile); + } else { + cacheDeviceIsAlignedMapIfAbsent(tsFile); + } + } + return deviceIsAlignedMap; + } + + public synchronized Map tryGetMeasurementDataTypeMap(final File tsFile) + throws IOException { + if (measurementDataTypeMap == null) { + cacheObjectsIfAbsent(tsFile); + } + return measurementDataTypeMap; + } + + synchronized boolean cacheDeviceIsAlignedMapIfAbsent(final File tsFile) throws IOException { + + if (allocatedMemoryBlock != null) { + // This means objects are already cached. + return true; + } + + // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. + // Only allocate when pipe memory used is less than 50%, because memory here + // is hard to shrink and may consume too much memory. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient( + PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), + MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "Failed to cacheDeviceIsAlignedMapIfAbsent for tsfile {}, because memory usage is high", + tsFile.getPath()); + return false; + } + + long memoryRequiredInBytes = 0L; + try (TsFileSequenceReader sequenceReader = + new TsFileSequenceReader(tsFile.getPath(), true, false)) { + deviceIsAlignedMap = new HashMap<>(); + final TsFileDeviceIterator deviceIsAlignedIterator = + sequenceReader.getAllDevicesIteratorWithIsAligned(); + while (deviceIsAlignedIterator.hasNext()) { + final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); + deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); + } + memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); + } + // Release memory of TsFileSequenceReader. + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + + // Allocate again for the cached objects. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", + tsFile.getPath()); + deviceIsAlignedMap = null; + return false; + } + + LOGGER.info("PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.", tsFile.getPath()); + return true; + } + + synchronized boolean cacheObjectsIfAbsent(final File tsFile) throws IOException { + if (allocatedMemoryBlock != null) { + if (deviceMeasurementsMap != null) { + return true; + } else { + // Recalculate it again because only deviceIsAligned map is cached + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + } + } + + // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. + // Only allocate when pipe memory used is less than 50%, because memory here + // is hard to shrink and may consume too much memory. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient( + PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), + MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "Failed to cacheObjectsIfAbsent for tsfile {}, because memory usage is high", + tsFile.getPath()); + return false; + } + + long memoryRequiredInBytes = 0L; + try (TsFileSequenceReader sequenceReader = + new TsFileSequenceReader(tsFile.getPath(), true, true)) { + deviceMeasurementsMap = sequenceReader.getDeviceMeasurementsMap(); + memoryRequiredInBytes += + PipeMemoryWeightUtil.memoryOfIDeviceID2StrList(deviceMeasurementsMap); + + if (Objects.isNull(deviceIsAlignedMap)) { + deviceIsAlignedMap = new HashMap<>(); + final TsFileDeviceIterator deviceIsAlignedIterator = + sequenceReader.getAllDevicesIteratorWithIsAligned(); + while (deviceIsAlignedIterator.hasNext()) { + final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); + deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); + } + } + memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); + + measurementDataTypeMap = sequenceReader.getFullPathDataTypeMap(); + memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfStr2TSDataType(measurementDataTypeMap); + } + // Release memory of TsFileSequenceReader. + allocatedMemoryBlock.close(); + allocatedMemoryBlock = null; + + // Allocate again for the cached objects. + allocatedMemoryBlock = + PipeDataNodeResourceManager.memory() + .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); + if (allocatedMemoryBlock == null) { + LOGGER.info( + "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", + tsFile.getPath()); + deviceIsAlignedMap = null; + deviceMeasurementsMap = null; + measurementDataTypeMap = null; + return false; + } + + LOGGER.info("PipeTsFileResource: Cached objects for tsfile {}.", tsFile.getPath()); + return true; + } +} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java index bf789b9732d6..8b37f8770944 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResource.java @@ -19,73 +19,33 @@ package org.apache.iotdb.db.pipe.resource.tsfile; -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlock; -import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil; -import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResourceStatus; - -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.file.metadata.IDeviceID; -import org.apache.tsfile.read.TsFileDeviceIterator; -import org.apache.tsfile.read.TsFileSequenceReader; -import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.io.IOException; import java.nio.file.Files; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; public class PipeTsFileResource implements AutoCloseable { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileResource.class); - public static final long TSFILE_MIN_TIME_TO_LIVE_IN_MS = 1000L * 20; - public static final float MEMORY_SUFFICIENT_THRESHOLD = 0.7f; - private final File hardlinkOrCopiedFile; - private final boolean isTsFile; - - /** this TsFileResource is used to track the {@link TsFileResourceStatus} of original TsFile. * */ - private final TsFileResource tsFileResource; private volatile long fileSize = -1L; private final AtomicInteger referenceCount; - private final AtomicLong lastUnpinToZeroTime; - private PipeMemoryBlock allocatedMemoryBlock; - private Map> deviceMeasurementsMap = null; - private Map deviceIsAlignedMap = null; - private Map measurementDataTypeMap = null; - public PipeTsFileResource( - final File hardlinkOrCopiedFile, - final boolean isTsFile, - final TsFileResource tsFileResource) { + public PipeTsFileResource(final File hardlinkOrCopiedFile) { this.hardlinkOrCopiedFile = hardlinkOrCopiedFile; - this.isTsFile = isTsFile; - this.tsFileResource = tsFileResource; referenceCount = new AtomicInteger(1); - lastUnpinToZeroTime = new AtomicLong(Long.MAX_VALUE); } public File getFile() { return hardlinkOrCopiedFile; } - public boolean isOriginalTsFileDeleted() { - return isTsFile && Objects.nonNull(tsFileResource) && tsFileResource.isDeleted(); - } - public long getFileSize() { if (fileSize == -1L) { synchronized (this) { @@ -97,64 +57,33 @@ public long getFileSize() { return fileSize; } - public long getTsFileResourceSize() { - return Objects.nonNull(tsFileResource) ? tsFileResource.calculateRamSize() : 0; - } - ///////////////////// Reference Count ///////////////////// public int getReferenceCount() { return referenceCount.get(); } - public int increaseAndGetReference() { - return referenceCount.addAndGet(1); + public void increaseReferenceCount() { + referenceCount.addAndGet(1); } - public int decreaseAndGetReference() { + public boolean decreaseReferenceCount() { final int finalReferenceCount = referenceCount.addAndGet(-1); if (finalReferenceCount == 0) { - lastUnpinToZeroTime.set(System.currentTimeMillis()); + close(); + return true; } if (finalReferenceCount < 0) { LOGGER.warn("PipeTsFileResource's reference count is decreased to below 0."); } - return finalReferenceCount; - } - - public synchronized boolean closeIfOutOfTimeToLive() { - if (referenceCount.get() <= 0 - && (deviceMeasurementsMap == null // Not cached yet. - || System.currentTimeMillis() - lastUnpinToZeroTime.get() - > TSFILE_MIN_TIME_TO_LIVE_IN_MS)) { - close(); - return true; - } else { - return false; - } + return false; } @Override public synchronized void close() { - if (deviceMeasurementsMap != null) { - deviceMeasurementsMap = null; - } - - if (deviceIsAlignedMap != null) { - deviceIsAlignedMap = null; - } - - if (measurementDataTypeMap != null) { - measurementDataTypeMap = null; - } - - if (allocatedMemoryBlock != null) { - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - } - + boolean successful = false; try { - Files.deleteIfExists(hardlinkOrCopiedFile.toPath()); + successful = Files.deleteIfExists(hardlinkOrCopiedFile.toPath()); } catch (final Exception e) { LOGGER.error( "PipeTsFileResource: Failed to delete tsfile {} when closing, because {}. Please MANUALLY delete it.", @@ -163,168 +92,8 @@ public synchronized void close() { e); } - LOGGER.info("PipeTsFileResource: Closed tsfile {} and cleaned up.", hardlinkOrCopiedFile); - } - - //////////////////////////// Cache Getter //////////////////////////// - - public synchronized Map> tryGetDeviceMeasurementsMap() - throws IOException { - if (deviceMeasurementsMap == null && isTsFile) { - cacheObjectsIfAbsent(); - } - return deviceMeasurementsMap; - } - - public synchronized Map tryGetDeviceIsAlignedMap( - final boolean cacheOtherMetadata) throws IOException { - if (deviceIsAlignedMap == null && isTsFile) { - if (cacheOtherMetadata) { - cacheObjectsIfAbsent(); - } else { - cacheDeviceIsAlignedMapIfAbsent(); - } - } - return deviceIsAlignedMap; - } - - public synchronized Map tryGetMeasurementDataTypeMap() throws IOException { - if (measurementDataTypeMap == null && isTsFile) { - cacheObjectsIfAbsent(); + if (successful) { + LOGGER.info("PipeTsFileResource: Closed tsfile {} and cleaned up.", hardlinkOrCopiedFile); } - return measurementDataTypeMap; - } - - synchronized boolean cacheDeviceIsAlignedMapIfAbsent() throws IOException { - if (!isTsFile) { - return false; - } - - if (allocatedMemoryBlock != null) { - // This means objects are already cached. - return true; - } - - // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. - // Only allocate when pipe memory used is less than 50%, because memory here - // is hard to shrink and may consume too much memory. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient( - PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), - MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "Failed to cacheDeviceIsAlignedMapIfAbsent for tsfile {}, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - return false; - } - - long memoryRequiredInBytes = 0L; - try (TsFileSequenceReader sequenceReader = - new TsFileSequenceReader(hardlinkOrCopiedFile.getPath(), true, false)) { - deviceIsAlignedMap = new HashMap<>(); - final TsFileDeviceIterator deviceIsAlignedIterator = - sequenceReader.getAllDevicesIteratorWithIsAligned(); - while (deviceIsAlignedIterator.hasNext()) { - final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); - deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); - } - memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); - } - // Release memory of TsFileSequenceReader. - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - - // Allocate again for the cached objects. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - deviceIsAlignedMap = null; - return false; - } - - LOGGER.info( - "PipeTsFileResource: Cached deviceIsAlignedMap for tsfile {}.", - hardlinkOrCopiedFile.getPath()); - return true; - } - - synchronized boolean cacheObjectsIfAbsent() throws IOException { - if (!isTsFile) { - return false; - } - - if (allocatedMemoryBlock != null) { - if (deviceMeasurementsMap != null) { - return true; - } else { - // Recalculate it again because only deviceIsAligned map is cached - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - } - } - - // See if pipe memory is sufficient to be allocated for TsFileSequenceReader. - // Only allocate when pipe memory used is less than 50%, because memory here - // is hard to shrink and may consume too much memory. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient( - PipeConfig.getInstance().getPipeMemoryAllocateForTsFileSequenceReaderInBytes(), - MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "Failed to cacheObjectsIfAbsent for tsfile {}, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - return false; - } - - long memoryRequiredInBytes = 0L; - try (TsFileSequenceReader sequenceReader = - new TsFileSequenceReader(hardlinkOrCopiedFile.getPath(), true, true)) { - deviceMeasurementsMap = sequenceReader.getDeviceMeasurementsMap(); - memoryRequiredInBytes += - PipeMemoryWeightUtil.memoryOfIDeviceID2StrList(deviceMeasurementsMap); - - if (Objects.isNull(deviceIsAlignedMap)) { - deviceIsAlignedMap = new HashMap<>(); - final TsFileDeviceIterator deviceIsAlignedIterator = - sequenceReader.getAllDevicesIteratorWithIsAligned(); - while (deviceIsAlignedIterator.hasNext()) { - final Pair deviceIsAlignedPair = deviceIsAlignedIterator.next(); - deviceIsAlignedMap.put(deviceIsAlignedPair.getLeft(), deviceIsAlignedPair.getRight()); - } - } - memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfIDeviceId2Bool(deviceIsAlignedMap); - - measurementDataTypeMap = sequenceReader.getFullPathDataTypeMap(); - memoryRequiredInBytes += PipeMemoryWeightUtil.memoryOfStr2TSDataType(measurementDataTypeMap); - } - // Release memory of TsFileSequenceReader. - allocatedMemoryBlock.close(); - allocatedMemoryBlock = null; - - // Allocate again for the cached objects. - allocatedMemoryBlock = - PipeDataNodeResourceManager.memory() - .forceAllocateIfSufficient(memoryRequiredInBytes, MEMORY_SUFFICIENT_THRESHOLD); - if (allocatedMemoryBlock == null) { - LOGGER.info( - "PipeTsFileResource: Failed to cache objects for tsfile {} in cache, because memory usage is high", - hardlinkOrCopiedFile.getPath()); - deviceIsAlignedMap = null; - deviceMeasurementsMap = null; - measurementDataTypeMap = null; - return false; - } - - LOGGER.info( - "PipeTsFileResource: Cached objects for tsfile {}.", hardlinkOrCopiedFile.getPath()); - return true; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java index d00318ae5616..bf8b6b86b439 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/tsfile/PipeTsFileResourceManager.java @@ -22,8 +22,7 @@ import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.utils.FileUtils; -import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; +import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.db.storageengine.dataregion.modification.ModificationFile; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; @@ -32,88 +31,30 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + import java.io.File; import java.io.IOException; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Optional; +import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; public class PipeTsFileResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(PipeTsFileResourceManager.class); - private final Map hardlinkOrCopiedFileToPipeTsFileResourceMap = - new ConcurrentHashMap<>(); - private final PipeTsFileResourceSegmentLock segmentLock = new PipeTsFileResourceSegmentLock(); - - public PipeTsFileResourceManager() { - PipeDataNodeAgent.runtime() - .registerPeriodicalJob( - "PipeTsFileResourceManager#ttlCheck()", - this::tryTtlCheck, - Math.max(PipeTsFileResource.TSFILE_MIN_TIME_TO_LIVE_IN_MS / 1000, 1)); - } - - private void tryTtlCheck() { - try { - ttlCheck(); - } catch (final InterruptedException e) { - Thread.currentThread().interrupt(); - LOGGER.warn("failed to try lock when checking TTL because of interruption", e); - } catch (final Exception e) { - LOGGER.warn("failed to check TTL of PipeTsFileResource: ", e); - } - } - - private void ttlCheck() throws InterruptedException { - final Iterator> iterator = - hardlinkOrCopiedFileToPipeTsFileResourceMap.entrySet().iterator(); - final long timeout = - PipeConfig.getInstance().getPipeSubtaskExecutorCronHeartbeatEventIntervalSeconds() >> 1; - final Optional logger = - PipeDataNodeResourceManager.log() - .schedule( - PipeTsFileResourceManager.class, - PipeConfig.getInstance().getPipeTsFilePinMaxLogNumPerRound(), - PipeConfig.getInstance().getPipeTsFilePinMaxLogIntervalRounds(), - hardlinkOrCopiedFileToPipeTsFileResourceMap.size()); - final StringBuilder logBuilder = new StringBuilder(); - while (iterator.hasNext()) { - final Map.Entry entry = iterator.next(); - - final String hardlinkOrCopiedFile = entry.getKey(); - if (!segmentLock.tryLock(new File(hardlinkOrCopiedFile), timeout, TimeUnit.SECONDS)) { - LOGGER.warn( - "failed to try lock when checking TTL for file {} because of timeout ({}s)", - hardlinkOrCopiedFile, - timeout); - continue; - } + // This is used to hold the assigner pinned tsFiles. + // Also, it is used to provide metadata cache of the tsFile, and is shared by all the pipe's + // tsFiles. + private final Map + hardlinkOrCopiedFileToTsFilePublicResourceMap = new ConcurrentHashMap<>(); - try { - if (entry.getValue().closeIfOutOfTimeToLive()) { - iterator.remove(); - } else { - logBuilder.append( - String.format( - "<%s , %d times, %d bytes> ", - entry.getKey(), - entry.getValue().getReferenceCount(), - entry.getValue().getFileSize())); - } - } catch (final Exception e) { - LOGGER.warn("failed to close PipeTsFileResource when checking TTL: ", e); - } finally { - segmentLock.unlock(new File(hardlinkOrCopiedFile)); - } - } - if (logBuilder.length() > 0) { - logger.ifPresent(l -> l.info("Pipe file {}are still referenced", logBuilder)); - } - } + // PipeName -> TsFilePath -> PipeTsFileResource + private final Map> + hardlinkOrCopiedFileToPipeTsFileResourceMap = new ConcurrentHashMap<>(); + private final PipeTsFileResourceSegmentLock segmentLock = new PipeTsFileResourceSegmentLock(); /** * Given a file, create a hardlink or copy it to pipe dir, maintain a reference count for the @@ -131,19 +72,16 @@ private void ttlCheck() throws InterruptedException { * @param file tsfile, resource file or mod file. can be original file or hardlink/copy of * original file * @param isTsFile {@code true} to create hardlink, {@code false} to copy file - * @param tsFileResource the TsFileResource of original TsFile. Ignored if {@param isTsFile} is - * {@code false}. * @return the hardlink or copied file * @throws IOException when create hardlink or copy file failed */ public File increaseFileReference( - final File file, final boolean isTsFile, final TsFileResource tsFileResource) - throws IOException { + final File file, final boolean isTsFile, final @Nullable String pipeName) throws IOException { // If the file is already a hardlink or copied file, // just increase reference count and return it segmentLock.lock(file); try { - if (increaseReferenceIfExists(file)) { + if (increaseReferenceIfExists(file, pipeName)) { return file; } } finally { @@ -152,13 +90,11 @@ public File increaseFileReference( // If the file is not a hardlink or copied file, check if there is a related hardlink or // copied file in pipe dir. if so, increase reference count and return it - final File hardlinkOrCopiedFile = getHardlinkOrCopiedFileInPipeDir(file); + final File hardlinkOrCopiedFile = getHardlinkOrCopiedFileInPipeDir(file, pipeName); segmentLock.lock(hardlinkOrCopiedFile); try { - if (increaseReferenceIfExists(hardlinkOrCopiedFile)) { - return hardlinkOrCopiedFileToPipeTsFileResourceMap - .get(hardlinkOrCopiedFile.getPath()) - .getFile(); + if (increaseReferenceIfExists(hardlinkOrCopiedFile, pipeName)) { + return getResourceMap(pipeName).get(hardlinkOrCopiedFile.getPath()).getFile(); } // If the file is a tsfile, create a hardlink in pipe dir and will return it. @@ -171,27 +107,57 @@ public File increaseFileReference( // If the file is not a hardlink or copied file, and there is no related hardlink or copied // file in pipe dir, create a hardlink or copy it to pipe dir, maintain a reference count for // the hardlink or copied file, and return the hardlink or copied file. - hardlinkOrCopiedFileToPipeTsFileResourceMap.put( - resultFile.getPath(), new PipeTsFileResource(resultFile, isTsFile, tsFileResource)); + if (Objects.nonNull(pipeName)) { + hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, k -> new ConcurrentHashMap<>()) + .put(resultFile.getPath(), new PipeTsFileResource(resultFile)); + } else { + hardlinkOrCopiedFileToTsFilePublicResourceMap.put( + resultFile.getPath(), new PipeTsFilePublicResource(resultFile)); + } + + increasePublicReference(resultFile, pipeName); + return resultFile; } finally { segmentLock.unlock(hardlinkOrCopiedFile); } } - private boolean increaseReferenceIfExists(final File file) { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(file.getPath()); + private boolean increaseReferenceIfExists(final File file, final @Nullable String pipeName) { + final String path = file.getPath(); + final PipeTsFileResource resource = getResourceMap(pipeName).get(path); if (resource != null) { - resource.increaseAndGetReference(); + resource.increaseReferenceCount(); + increasePublicReference(file, pipeName); return true; } return false; } - public static File getHardlinkOrCopiedFileInPipeDir(final File file) throws IOException { + private void increasePublicReference(final File file, final String pipeName) { + if (Objects.isNull(pipeName)) { + return; + } + // Increase the assigner's file to avoid hard-link or memory cache cleaning + // Note that it does not exist for historical files + final String path = getCommonFilePath(file); + hardlinkOrCopiedFileToTsFilePublicResourceMap.compute( + path, + (k, v) -> { + if (Objects.isNull(v)) { + return new PipeTsFilePublicResource(new File(path)); + } else { + v.increaseReferenceCount(); + return v; + } + }); + } + + public static File getHardlinkOrCopiedFileInPipeDir( + final File file, final @Nullable String pipeName) throws IOException { try { - return new File(getPipeTsFileDirPath(file), getRelativeFilePath(file)); + return new File(getPipeTsFileDirPath(file, pipeName), getRelativeFilePath(file)); } catch (final Exception e) { throw new IOException( String.format( @@ -202,22 +168,28 @@ public static File getHardlinkOrCopiedFileInPipeDir(final File file) throws IOEx } } - private static String getPipeTsFileDirPath(File file) throws IOException { + private static String getPipeTsFileDirPath(File file, final @Nullable String pipeName) + throws IOException { while (!file.getName().equals(IoTDBConstant.SEQUENCE_FOLDER_NAME) - && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)) { + && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME) + && !file.getName().equals(PipeConfig.getInstance().getPipeHardlinkBaseDirName())) { file = file.getParentFile(); } return file.getParentFile().getCanonicalPath() + File.separator + PipeConfig.getInstance().getPipeHardlinkBaseDirName() + File.separator - + PipeConfig.getInstance().getPipeHardlinkTsFileDirName(); + + PipeConfig.getInstance().getPipeHardlinkTsFileDirName() + + (Objects.nonNull(pipeName) ? File.separator + pipeName : ""); } private static String getRelativeFilePath(File file) { StringBuilder builder = new StringBuilder(file.getName()); while (!file.getName().equals(IoTDBConstant.SEQUENCE_FOLDER_NAME) - && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME)) { + && !file.getName().equals(IoTDBConstant.UNSEQUENCE_FOLDER_NAME) + && !file.getParentFile() + .getName() + .equals(PipeConfig.getInstance().getPipeHardlinkTsFileDirName())) { file = file.getParentFile(); builder = new StringBuilder(file.getName()) @@ -231,32 +203,68 @@ private static String getRelativeFilePath(File file) { * Given a hardlink or copied file, decrease its reference count, if the reference count is 0, * delete the file. if the given file is not a hardlink or copied file, do nothing. * - * @param hardlinkOrCopiedFile the copied or hardlinked file + * @param hardlinkOrCopiedFile the copied or hard-linked file */ - public void decreaseFileReference(final File hardlinkOrCopiedFile) { + public void decreaseFileReference( + final File hardlinkOrCopiedFile, final @Nullable String pipeName) { segmentLock.lock(hardlinkOrCopiedFile); try { final String filePath = hardlinkOrCopiedFile.getPath(); - final PipeTsFileResource resource = hardlinkOrCopiedFileToPipeTsFileResourceMap.get(filePath); - if (resource != null) { - resource.decreaseAndGetReference(); + final PipeTsFileResource resource = getResourceMap(pipeName).get(filePath); + if (resource != null && resource.decreaseReferenceCount()) { + getResourceMap(pipeName).remove(filePath); } + // Decrease the assigner's file to clear hard-link and memory cache + // Note that it does not exist for historical files + decreasePublicReferenceIfExists(hardlinkOrCopiedFile, pipeName); } finally { segmentLock.unlock(hardlinkOrCopiedFile); } } + private void decreasePublicReferenceIfExists(final File file, final @Nullable String pipeName) { + if (Objects.isNull(pipeName)) { + return; + } + // Increase the assigner's file to avoid hard-link or memory cache cleaning + // Note that it does not exist for historical files + final String commonFilePath = getCommonFilePath(file); + if (hardlinkOrCopiedFileToTsFilePublicResourceMap.containsKey(commonFilePath) + && hardlinkOrCopiedFileToTsFilePublicResourceMap + .get(commonFilePath) + .decreaseReferenceCount()) { + hardlinkOrCopiedFileToPipeTsFileResourceMap.remove(commonFilePath); + } + } + + // Warning: Shall not be called by the assigner + private String getCommonFilePath(final @Nonnull File file) { + // If the parent or grandparent is null then this is testing scenario + // Skip the "pipeName" of this file + return Objects.isNull(file.getParentFile()) + || Objects.isNull(file.getParentFile().getParentFile()) + ? file.getPath() + : file.getParentFile().getParent() + File.separator + file.getName(); + } + /** * Get the reference count of the file. * * @param hardlinkOrCopiedFile the copied or hardlinked file * @return the reference count of the file */ - public int getFileReferenceCount(final File hardlinkOrCopiedFile) { + @TestOnly + public int getFileReferenceCount( + final File hardlinkOrCopiedFile, final @Nullable String pipeName) { segmentLock.lock(hardlinkOrCopiedFile); try { - final String filePath = hardlinkOrCopiedFile.getPath(); - final PipeTsFileResource resource = hardlinkOrCopiedFileToPipeTsFileResourceMap.get(filePath); + final PipeTsFileResource resource = + Objects.nonNull(pipeName) + ? hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>()) + .get(hardlinkOrCopiedFile.getPath()) + : hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedFile)); return resource != null ? resource.getReferenceCount() : 0; } finally { segmentLock.unlock(hardlinkOrCopiedFile); @@ -272,9 +280,14 @@ public int getFileReferenceCount(final File hardlinkOrCopiedFile) { public boolean cacheObjectsIfAbsent(final File hardlinkOrCopiedTsFile) throws IOException { segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource != null && resource.cacheObjectsIfAbsent(); + if (hardlinkOrCopiedTsFile.getParentFile() == null + || hardlinkOrCopiedTsFile.getParentFile().getParentFile() == null) { + return false; + } + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource != null && resource.cacheObjectsIfAbsent(hardlinkOrCopiedTsFile); } finally { segmentLock.unlock(hardlinkOrCopiedTsFile); } @@ -284,9 +297,10 @@ public Map> getDeviceMeasurementsMapFromCache( final File hardlinkOrCopiedTsFile) throws IOException { segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource == null ? null : resource.tryGetDeviceMeasurementsMap(); + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource == null ? null : resource.tryGetDeviceMeasurementsMap(hardlinkOrCopiedTsFile); } finally { segmentLock.unlock(hardlinkOrCopiedTsFile); } @@ -296,9 +310,12 @@ public Map getDeviceIsAlignedMapFromCache( final File hardlinkOrCopiedTsFile, final boolean cacheOtherMetadata) throws IOException { segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource == null ? null : resource.tryGetDeviceIsAlignedMap(cacheOtherMetadata); + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource == null + ? null + : resource.tryGetDeviceIsAlignedMap(cacheOtherMetadata, hardlinkOrCopiedTsFile); } finally { segmentLock.unlock(hardlinkOrCopiedTsFile); } @@ -308,38 +325,55 @@ public Map getMeasurementDataTypeMapFromCache( final File hardlinkOrCopiedTsFile) throws IOException { segmentLock.lock(hardlinkOrCopiedTsFile); try { - final PipeTsFileResource resource = - hardlinkOrCopiedFileToPipeTsFileResourceMap.get(hardlinkOrCopiedTsFile.getPath()); - return resource == null ? null : resource.tryGetMeasurementDataTypeMap(); + final PipeTsFilePublicResource resource = + hardlinkOrCopiedFileToTsFilePublicResourceMap.get( + getCommonFilePath(hardlinkOrCopiedTsFile)); + return resource == null + ? null + : resource.tryGetMeasurementDataTypeMap(hardlinkOrCopiedTsFile); } finally { segmentLock.unlock(hardlinkOrCopiedTsFile); } } - public void pinTsFileResource(final TsFileResource resource, final boolean withMods) + public Map getResourceMap(final @Nullable String pipeName) { + return Objects.nonNull(pipeName) + ? hardlinkOrCopiedFileToPipeTsFileResourceMap.computeIfAbsent( + pipeName, k -> new ConcurrentHashMap<>()) + : hardlinkOrCopiedFileToTsFilePublicResourceMap; + } + + public void pinTsFileResource( + final TsFileResource resource, final boolean withMods, final @Nullable String pipeName) throws IOException { - increaseFileReference(resource.getTsFile(), true, resource); + increaseFileReference(resource.getTsFile(), true, pipeName); if (withMods && resource.getModFile().exists()) { - increaseFileReference(new File(resource.getModFile().getFilePath()), false, null); + increaseFileReference(new File(resource.getModFile().getFilePath()), false, pipeName); } } - public void unpinTsFileResource(final TsFileResource resource) throws IOException { - final File pinnedFile = getHardlinkOrCopiedFileInPipeDir(resource.getTsFile()); - decreaseFileReference(pinnedFile); + public void unpinTsFileResource(final TsFileResource resource, final @Nullable String pipeName) + throws IOException { + final File pinnedFile = getHardlinkOrCopiedFileInPipeDir(resource.getTsFile(), pipeName); + decreaseFileReference(pinnedFile, pipeName); final File modFile = new File(pinnedFile + ModificationFile.FILE_SUFFIX); if (modFile.exists()) { - decreaseFileReference(modFile); + decreaseFileReference(modFile, pipeName); } } - public int getLinkedTsfileCount() { - return hardlinkOrCopiedFileToPipeTsFileResourceMap.size(); + public int getLinkedTsFileCount(final @Nonnull String pipeName) { + return hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>()) + .size(); } - public long getTotalLinkedTsfileSize() { - return hardlinkOrCopiedFileToPipeTsFileResourceMap.values().stream() + public long getTotalLinkedTsFileSize(final @Nonnull String pipeName) { + return hardlinkOrCopiedFileToPipeTsFileResourceMap + .computeIfAbsent(pipeName, pipe -> new ConcurrentHashMap<>()) + .values() + .stream() .mapToLong( resource -> { try { @@ -351,47 +385,4 @@ public long getTotalLinkedTsfileSize() { }) .sum(); } - - /** - * Get the total size of linked TsFiles whose original TsFile is deleted (by compaction or else) - */ - public long getTotalLinkedButDeletedTsfileSize() { - try { - return hardlinkOrCopiedFileToPipeTsFileResourceMap.values().parallelStream() - .filter(PipeTsFileResource::isOriginalTsFileDeleted) - .mapToLong( - resource -> { - try { - return resource.getFileSize(); - } catch (Exception e) { - LOGGER.warn( - "failed to get file size of linked but deleted TsFile {}: ", resource, e); - return 0; - } - }) - .sum(); - } catch (final Exception e) { - LOGGER.warn("failed to get total size of linked but deleted TsFiles: ", e); - return 0; - } - } - - public long getTotalLinkedButDeletedTsFileResourceRamSize() { - long totalLinkedButDeletedTsfileResourceRamSize = 0; - try { - for (final Map.Entry resourceEntry : - hardlinkOrCopiedFileToPipeTsFileResourceMap.entrySet()) { - final PipeTsFileResource pipeTsFileResource = resourceEntry.getValue(); - // If the original TsFile is not deleted, the memory of the resource is not counted - // because the memory of the resource is controlled by TsFileResourceManager. - if (pipeTsFileResource.isOriginalTsFileDeleted()) { - totalLinkedButDeletedTsfileResourceRamSize += pipeTsFileResource.getTsFileResourceSize(); - } - } - return totalLinkedButDeletedTsfileResourceRamSize; - } catch (final Exception e) { - LOGGER.warn("failed to get total size of linked but deleted TsFiles resource ram size: ", e); - return totalLinkedButDeletedTsfileResourceRamSize; - } - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java deleted file mode 100644 index 9d1e530a19d1..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResource.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal; - -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; -import org.apache.iotdb.commons.exception.pipe.PipeRuntimeNonCriticalException; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -public abstract class PipeWALResource implements Closeable { - - private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALResource.class); - - protected final WALEntryHandler walEntryHandler; - - private final AtomicInteger referenceCount; - - public static final long WAL_MIN_TIME_TO_LIVE_IN_MS = 1000L * 20; - private final AtomicLong lastLogicalPinTime; - private final AtomicBoolean isPhysicallyPinned; - - protected PipeWALResource(WALEntryHandler walEntryHandler) { - this.walEntryHandler = walEntryHandler; - - referenceCount = new AtomicInteger(0); - - lastLogicalPinTime = new AtomicLong(0); - isPhysicallyPinned = new AtomicBoolean(false); - } - - public final void pin() throws PipeRuntimeNonCriticalException { - if (referenceCount.get() == 0) { - if (!isPhysicallyPinned.get()) { - try { - pinInternal(); - } catch (MemTablePinException e) { - throw new PipeRuntimeNonCriticalException( - String.format( - "failed to pin wal %d, because %s", - walEntryHandler.getMemTableId(), e.getMessage())); - } - isPhysicallyPinned.set(true); - LOGGER.info("wal {} is pinned by pipe engine", walEntryHandler.getMemTableId()); - } // else means the wal is already pinned, do nothing - - // no matter the wal is pinned or not, update the last pin time - lastLogicalPinTime.set(System.currentTimeMillis()); - } - - referenceCount.incrementAndGet(); - } - - protected abstract void pinInternal() - throws MemTablePinException, PipeRuntimeNonCriticalException; - - public final void unpin() throws PipeRuntimeNonCriticalException { - final int finalReferenceCount = referenceCount.get(); - - if (finalReferenceCount == 1) { - unpinPhysicallyIfOutOfTimeToLive(); - } else if (finalReferenceCount < 1) { - throw new PipeRuntimeCriticalException( - String.format( - "wal %d is unpinned more than pinned, this should not happen", - walEntryHandler.getMemTableId())); - } - - referenceCount.decrementAndGet(); - } - - protected abstract void unpinInternal() - throws MemTablePinException, PipeRuntimeNonCriticalException; - - /** - * Invalidate the wal if it is unpinned and out of time to live. - * - * @return true if the wal is invalidated, false otherwise - */ - public final boolean invalidateIfPossible() { - if (referenceCount.get() > 0) { - return false; - } - - // referenceCount.get() == 0 - return unpinPhysicallyIfOutOfTimeToLive(); - } - - /** - * Unpin the wal if it is out of time to live. - * - * @return true if the wal is unpinned physically (then it can be invalidated), false otherwise - * @throws PipeRuntimeNonCriticalException if failed to unpin WAL of memtable. - */ - private boolean unpinPhysicallyIfOutOfTimeToLive() { - if (isPhysicallyPinned.get()) { - if (System.currentTimeMillis() - lastLogicalPinTime.get() > WAL_MIN_TIME_TO_LIVE_IN_MS) { - try { - unpinInternal(); - } catch (MemTablePinException e) { - throw new PipeRuntimeNonCriticalException( - String.format( - "failed to unpin wal %d, because %s", - walEntryHandler.getMemTableId(), e.getMessage())); - } - isPhysicallyPinned.set(false); - LOGGER.info( - "wal {} is unpinned by pipe engine when checking time to live", - walEntryHandler.getMemTableId()); - return true; - } else { - return false; - } - } else { - LOGGER.info( - "wal {} is not pinned physically when checking time to live", - walEntryHandler.getMemTableId()); - return true; - } - } - - @Override - public final void close() { - if (isPhysicallyPinned.get()) { - try { - unpinInternal(); - } catch (MemTablePinException e) { - LOGGER.error( - "failed to unpin wal {} when closing pipe wal resource, because {}", - walEntryHandler.getMemTableId(), - e.getMessage()); - } - isPhysicallyPinned.set(false); - LOGGER.info( - "wal {} is unpinned by pipe engine when closing pipe wal resource", - walEntryHandler.getMemTableId()); - } - - referenceCount.set(0); - } - - public int getReferenceCount() { - return referenceCount.get(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java deleted file mode 100644 index 9c51d79daad4..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/PipeWALResourceManager.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal; - -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ConcurrentModificationException; -import java.util.Iterator; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.ReentrantLock; - -public abstract class PipeWALResourceManager { - - private static final Logger LOGGER = LoggerFactory.getLogger(PipeWALResourceManager.class); - - protected final Map memtableIdToPipeWALResourceMap; - - private static final int SEGMENT_LOCK_COUNT = 32; - private final ReentrantLock[] memTableIdSegmentLocks; - - protected PipeWALResourceManager() { - // memTableIdToPipeWALResourceMap can be concurrently accessed by multiple threads - memtableIdToPipeWALResourceMap = new ConcurrentHashMap<>(); - - memTableIdSegmentLocks = new ReentrantLock[SEGMENT_LOCK_COUNT]; - for (int i = 0; i < SEGMENT_LOCK_COUNT; i++) { - memTableIdSegmentLocks[i] = new ReentrantLock(); - } - - PipeDataNodeAgent.runtime() - .registerPeriodicalJob( - "PipeWALResourceManager#ttlCheck()", - this::ttlCheck, - Math.max(PipeWALResource.WAL_MIN_TIME_TO_LIVE_IN_MS / 1000, 1)); - } - - @SuppressWarnings("java:S2222") - private void ttlCheck() { - final Iterator> iterator = - memtableIdToPipeWALResourceMap.entrySet().iterator(); - final Optional logger = - PipeDataNodeResourceManager.log() - .schedule( - PipeWALResourceManager.class, - PipeConfig.getInstance().getPipeWalPinMaxLogNumPerRound(), - PipeConfig.getInstance().getPipeWalPinMaxLogIntervalRounds(), - memtableIdToPipeWALResourceMap.size()); - - final StringBuilder logBuilder = new StringBuilder(); - try { - while (iterator.hasNext()) { - final Map.Entry entry = iterator.next(); - final ReentrantLock lock = - memTableIdSegmentLocks[(int) (entry.getKey() % SEGMENT_LOCK_COUNT)]; - - lock.lock(); - try { - if (entry.getValue().invalidateIfPossible()) { - iterator.remove(); - } else { - logBuilder.append( - String.format( - "<%d , %d times> ", entry.getKey(), entry.getValue().getReferenceCount())); - } - } finally { - lock.unlock(); - } - } - } catch (final ConcurrentModificationException e) { - LOGGER.error( - "Concurrent modification issues happened, skipping the WAL in this round of ttl check", - e); - } finally { - if (logBuilder.length() > 0) { - logger.ifPresent(l -> l.info("WAL {}are still referenced", logBuilder)); - } - } - } - - public final void pin(final WALEntryHandler walEntryHandler) throws IOException { - final long memTableId = walEntryHandler.getMemTableId(); - final ReentrantLock lock = memTableIdSegmentLocks[(int) (memTableId % SEGMENT_LOCK_COUNT)]; - - lock.lock(); - try { - pinInternal(memTableId, walEntryHandler); - } finally { - lock.unlock(); - } - } - - protected abstract void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) - throws IOException; - - public final void unpin(final WALEntryHandler walEntryHandler) throws IOException { - final long memTableId = walEntryHandler.getMemTableId(); - final ReentrantLock lock = memTableIdSegmentLocks[(int) (memTableId % SEGMENT_LOCK_COUNT)]; - - lock.lock(); - try { - unpinInternal(memTableId, walEntryHandler); - } finally { - lock.unlock(); - } - } - - protected abstract void unpinInternal( - final long memTableId, final WALEntryHandler walEntryHandler) throws IOException; - - public int getPinnedWalCount() { - return Objects.nonNull(memtableIdToPipeWALResourceMap) - ? memtableIdToPipeWALResourceMap.size() - : 0; - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java deleted file mode 100644 index f1ad513ccc4c..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResource.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal.hardlink; - -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -public class PipeWALHardlinkResource extends PipeWALResource { - - private final PipeWALHardlinkResourceManager resourceManager; - - protected PipeWALHardlinkResource( - WALEntryHandler walEntryHandler, PipeWALHardlinkResourceManager resourceManager) { - super(walEntryHandler); - this.resourceManager = resourceManager; - } - - @Override - protected void pinInternal() throws MemTablePinException { - // TODO: hardlink - walEntryHandler.pinMemTable(); - } - - @Override - protected void unpinInternal() throws MemTablePinException { - // TODO: hardlink - walEntryHandler.unpinMemTable(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java deleted file mode 100644 index eebf766dc367..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/hardlink/PipeWALHardlinkResourceManager.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal.hardlink; - -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.utils.FileUtils; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.util.HashMap; -import java.util.Map; - -public class PipeWALHardlinkResourceManager extends PipeWALResourceManager { - - @Override - protected void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap - .computeIfAbsent(memTableId, id -> new PipeWALHardlinkResource(walEntryHandler, this)) - .pin(); - } - - @Override - protected void unpinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap.get(memTableId).unpin(); - } - - //////////////////////////// hardlink related //////////////////////////// - - private final Map hardlinkToReferenceMap = new HashMap<>(); - - /** - * given a file, create a hardlink, maintain a reference count for the hardlink, and return the - * hardlink. - * - *

if the given file is already a hardlink, increase its reference count and return it. - * - *

if the given file is a wal, create a hardlink in pipe dir, increase the reference count of - * the hardlink and return it. - * - * @param file wal file. can be original file or the hardlink of original file - * @return the hardlink - * @throws IOException when create hardlink failed - */ - public synchronized File increaseFileReference(final File file) throws IOException { - // if the file is already a hardlink, just increase reference count and return it - if (increaseReferenceIfExists(file.getPath())) { - return file; - } - - // if the file is not a hardlink, check if there is a related hardlink in pipe dir. if so, - // increase reference count and return it. - final File hardlink = getHardlinkInPipeWALDir(file); - if (increaseReferenceIfExists(hardlink.getPath())) { - return hardlink; - } - - // if the file is a wal, and there is no related hardlink in pipe dir, create a hardlink to pipe - // dir, maintain a reference count for the hardlink, and return the hardlink. - hardlinkToReferenceMap.put(hardlink.getPath(), 1); - return FileUtils.createHardLink(file, hardlink); - } - - private boolean increaseReferenceIfExists(final String path) { - hardlinkToReferenceMap.computeIfPresent(path, (k, v) -> v + 1); - return hardlinkToReferenceMap.containsKey(path); - } - - // TODO: Check me! Make sure the file is not a hardlink. - // TODO: IF user specify a wal by config, will the method work? - private static File getHardlinkInPipeWALDir(final File file) throws IOException { - try { - return new File(getPipeWALDirPath(file), getRelativeFilePath(file)); - } catch (final Exception e) { - throw new IOException( - String.format( - "failed to get hardlink in pipe dir " + "for file %s, it is not a wal", - file.getPath()), - e); - } - } - - private static String getPipeWALDirPath(File file) throws IOException { - while (!file.getName().equals(IoTDBConstant.WAL_FOLDER_NAME)) { - file = file.getParentFile(); - } - - return file.getParentFile().getCanonicalPath() - + File.separator - + IoTDBConstant.DATA_FOLDER_NAME - + File.separator - + PipeConfig.getInstance().getPipeHardlinkBaseDirName() - + File.separator - + PipeConfig.getInstance().getPipeHardlinkWALDirName(); - } - - private static String getRelativeFilePath(File file) { - StringBuilder builder = new StringBuilder(file.getName()); - while (!file.getParentFile().getName().equals(IoTDBConstant.WAL_FOLDER_NAME)) { - file = file.getParentFile(); - builder = - new StringBuilder(file.getName()) - .append(IoTDBConstant.FILE_NAME_SEPARATOR) - .append(builder); - } - return builder.toString(); - } - - /** - * given a hardlink, decrease its reference count, if the reference count is 0, delete the file. - * if the given file is not a hardlink, do nothing. - * - * @param hardlink the hardlinked file - * @throws IOException when delete file failed - */ - public synchronized void decreaseFileReference(final File hardlink) throws IOException { - final Integer updatedReference = - hardlinkToReferenceMap.computeIfPresent( - hardlink.getPath(), (file, reference) -> reference - 1); - - if (updatedReference != null && updatedReference == 0) { - Files.deleteIfExists(hardlink.toPath()); - hardlinkToReferenceMap.remove(hardlink.getPath()); - } - } - - @TestOnly - public synchronized int getFileReferenceCount(final File hardlink) { - return hardlinkToReferenceMap.getOrDefault(hardlink.getPath(), 0); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java deleted file mode 100644 index e8e03e64a162..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResource.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal.selfhost; - -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -public class PipeWALSelfHostResource extends PipeWALResource { - - public PipeWALSelfHostResource(WALEntryHandler walEntryHandler) { - super(walEntryHandler); - } - - @Override - protected void pinInternal() throws MemTablePinException { - walEntryHandler.pinMemTable(); - } - - @Override - protected void unpinInternal() throws MemTablePinException { - walEntryHandler.unpinMemTable(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java deleted file mode 100644 index c7fe0accda2f..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/resource/wal/selfhost/PipeWALSelfHostResourceManager.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource.wal.selfhost; - -import org.apache.iotdb.db.pipe.resource.wal.PipeWALResourceManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - -public class PipeWALSelfHostResourceManager extends PipeWALResourceManager { - - @Override - protected void pinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap - .computeIfAbsent(memTableId, id -> new PipeWALSelfHostResource(walEntryHandler)) - .pin(); - } - - @Override - protected void unpinInternal(final long memTableId, final WALEntryHandler walEntryHandler) { - memtableIdToPipeWALResourceMap.get(memTableId).unpin(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java index ec1c3686f240..5d7fa891530c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java @@ -41,6 +41,9 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.client.request.AsyncRequestContext; import org.apache.iotdb.commons.cluster.NodeStatus; +import org.apache.iotdb.commons.concurrent.IoTThreadFactory; +import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor; import org.apache.iotdb.commons.conf.CommonConfig; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.ConfigurationFileUtils; @@ -272,9 +275,17 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -316,6 +327,18 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface private final CommonConfig commonConfig = CommonDescriptor.getInstance().getConfig(); + private final ExecutorService schemaExecutor = + new WrappedThreadPoolExecutor( + 0, + IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount(), + 0L, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>( + IoTDBDescriptor.getInstance().getConfig().getSchemaThreadCount()), + new IoTThreadFactory(ThreadName.SCHEMA_PARALLEL_POOL.getName()), + ThreadName.SCHEMA_PARALLEL_POOL.getName(), + new ThreadPoolExecutor.CallerRunsPolicy()); + private static final String SYSTEM = "system"; public DataNodeInternalRPCServiceImpl() { @@ -1071,6 +1094,10 @@ public TPushPipeMetaResp pushPipeMeta(TPushPipeMetaReq req) { .map(PipeMeta::deserialize4TaskAgent) .collect(Collectors.toList())); + if (Objects.isNull(exceptionMessages)) { + return new TPushPipeMetaResp() + .setStatus(new TSStatus(TSStatusCode.PIPE_PUSH_META_TIMEOUT.getStatusCode())); + } return exceptionMessages.isEmpty() ? new TPushPipeMetaResp() .setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())) @@ -1347,16 +1374,31 @@ private TSStatus executeSchemaBlackListTask( final List statusList = Collections.synchronizedList(new ArrayList<>()); final AtomicBoolean hasFailure = new AtomicBoolean(false); - consensusGroupIdList.parallelStream() - .forEach( - consensusGroupId -> { - final TSStatus status = executeOnOneRegion.apply(consensusGroupId); - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && status.getCode() != TSStatusCode.ONLY_LOGICAL_VIEW.getStatusCode()) { - hasFailure.set(true); - } - statusList.add(status); - }); + final Set> schemaFuture = new HashSet<>(); + + consensusGroupIdList.forEach( + consensusGroupId -> + schemaFuture.add( + schemaExecutor.submit( + () -> { + final TSStatus status = executeOnOneRegion.apply(consensusGroupId); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() + && status.getCode() != TSStatusCode.ONLY_LOGICAL_VIEW.getStatusCode()) { + hasFailure.set(true); + } + statusList.add(status); + }))); + + for (final Future future : schemaFuture) { + try { + future.get(); + } catch (final ExecutionException | InterruptedException e) { + LOGGER.warn("Exception occurs when executing internal schema task: ", e); + statusList.add( + new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()) + .setMessage(e.toString())); + } + } if (hasFailure.get()) { return RpcUtils.getStatus(statusList); @@ -1375,15 +1417,30 @@ private TSStatus executeInternalSchemaTask( final List statusList = Collections.synchronizedList(new ArrayList<>()); final AtomicBoolean hasFailure = new AtomicBoolean(false); - consensusGroupIdList.parallelStream() - .forEach( - consensusGroupId -> { - final TSStatus status = executeOnOneRegion.apply(consensusGroupId); - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - hasFailure.set(true); - } - statusList.add(status); - }); + final Set> schemaFuture = new HashSet<>(); + + consensusGroupIdList.forEach( + consensusGroupId -> + schemaFuture.add( + schemaExecutor.submit( + () -> { + final TSStatus status = executeOnOneRegion.apply(consensusGroupId); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + hasFailure.set(true); + } + statusList.add(status); + }))); + + for (final Future future : schemaFuture) { + try { + future.get(); + } catch (final ExecutionException | InterruptedException e) { + LOGGER.warn("Exception occurs when executing internal schema task: ", e); + statusList.add( + new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()) + .setMessage(e.toString())); + } + } if (hasFailure.get()) { return RpcUtils.getStatus(statusList); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java index 2d45a237e73c..171f2b2f87f9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileAnalyzer.java @@ -404,7 +404,7 @@ private boolean handleSingleMiniFile(final int i) throws FileNotFoundException { final long startTime = System.nanoTime(); try { final LoadTsFileDataTypeConverter loadTsFileDataTypeConverter = - new LoadTsFileDataTypeConverter(isGeneratedByPipe); + new LoadTsFileDataTypeConverter(context, isGeneratedByPipe); final TSStatus status = loadTsFileDataTypeConverter @@ -529,7 +529,7 @@ private Analysis executeTabletConversionOnException( } final LoadTsFileDataTypeConverter loadTsFileDataTypeConverter = - new LoadTsFileDataTypeConverter(isGeneratedByPipe); + new LoadTsFileDataTypeConverter(context, isGeneratedByPipe); final TSStatus status = loadTsFileStatement.isConvertOnTypeMismatch() ? loadTsFileDataTypeConverter.convertForTreeModel(loadTsFileStatement).orElse(null) @@ -896,13 +896,12 @@ private void verifySchema(ISchemaTree schemaTree) // check device schema: is aligned or not final boolean isAlignedInTsFile = schemaCache.getDeviceIsAligned(device); final boolean isAlignedInIoTDB = iotdbDeviceSchemaInfo.isAligned(); - if (isAlignedInTsFile != isAlignedInIoTDB) { - throw new LoadAnalyzeException( - String.format( - "Device %s in TsFile is %s, but in IoTDB is %s.", - device, - isAlignedInTsFile ? "aligned" : "not aligned", - isAlignedInIoTDB ? "aligned" : "not aligned")); + if (LOGGER.isInfoEnabled() && isAlignedInTsFile != isAlignedInIoTDB) { + LOGGER.info( + "Device {} in TsFile is {}, but in IoTDB is {}.", + device, + isAlignedInTsFile ? "aligned" : "not aligned", + isAlignedInIoTDB ? "aligned" : "not aligned"); } // check timeseries schema @@ -920,15 +919,14 @@ private void verifySchema(ISchemaTree schemaTree) } // check datatype - if (!tsFileSchema.getType().equals(iotdbSchema.getType())) { - throw new LoadAnalyzeTypeMismatchException( - String.format( - "Measurement %s%s%s datatype not match, TsFile: %s, IoTDB: %s", - device, - TsFileConstant.PATH_SEPARATOR, - iotdbSchema.getMeasurementId(), - tsFileSchema.getType(), - iotdbSchema.getType())); + if (LOGGER.isInfoEnabled() && !tsFileSchema.getType().equals(iotdbSchema.getType())) { + LOGGER.info( + "Measurement {}{}{} datatype not match, TsFile: {}, IoTDB: {}", + device, + TsFileConstant.PATH_SEPARATOR, + iotdbSchema.getMeasurementId(), + tsFileSchema.getType(), + iotdbSchema.getType()); } // check encoding diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java index 11478cf1f77f..c7bda59fd85f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/lock/DataNodeSchemaLockManager.java @@ -54,6 +54,7 @@ public void releaseReadLock(final MPPQueryContext queryContext) { queryContext .getAcquiredLocks() .forEach(lockType -> locks[lockType.ordinal()].readLock().unlock()); + queryContext.getAcquiredLocks().clear(); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java index 59aa57ffd4e0..65eb21beee24 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/load/LoadTsFileScheduler.java @@ -536,7 +536,7 @@ private boolean loadLocally(LoadSingleTsFileNode node) throws IoTDBException { private void convertFailedTsFilesToTabletsAndRetry() { final LoadTsFileDataTypeConverter loadTsFileDataTypeConverter = - new LoadTsFileDataTypeConverter(isGeneratedByPipe); + new LoadTsFileDataTypeConverter(queryContext, isGeneratedByPipe); final Iterator iterator = failedTsFileNodeIndexes.listIterator(); while (iterator.hasNext()) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java index f923fb2f3413..6fbf870617a7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/StorageEngine.java @@ -38,7 +38,6 @@ import org.apache.iotdb.commons.exception.ShutdownException; import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.file.SystemFileFactory; -import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.schema.ttl.TTLCache; import org.apache.iotdb.commons.service.IService; import org.apache.iotdb.commons.service.ServiceType; @@ -54,7 +53,6 @@ import org.apache.iotdb.db.exception.WriteProcessRejectException; import org.apache.iotdb.db.exception.load.LoadReadOnlyException; import org.apache.iotdb.db.exception.runtime.StorageEngineFailureException; -import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; import org.apache.iotdb.db.queryengine.plan.analyze.cache.schema.DataNodeTTLCache; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.load.LoadTsFilePieceNode; import org.apache.iotdb.db.queryengine.plan.scheduler.load.LoadTsFileScheduler; @@ -219,12 +217,6 @@ private void asyncRecoverDataRegion() throws StartupException { LOGGER.info( "Storage Engine recover cost: {}s.", (System.currentTimeMillis() - startRecoverTime) / 1000); - - PipeDataNodeAgent.runtime() - .registerPeriodicalJob( - "StorageEngine#operateFlush", - () -> operateFlush(new TFlushReq()), - PipeConfig.getInstance().getPipeStorageEngineFlushTimeIntervalMs() / 1000); }, ThreadName.STORAGE_ENGINE_RECOVER_TRIGGER.getName()); recoverEndTrigger.start(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java index 5e9b25bdda11..555d90e6c7c7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/DataRegion.java @@ -1898,7 +1898,7 @@ private void waitClosingTsFileProcessorFinished() throws InterruptedException { } /** close all working tsfile processors */ - private List> asyncCloseAllWorkingTsFileProcessors() { + public List> asyncCloseAllWorkingTsFileProcessors() { writeLock("asyncCloseAllWorkingTsFileProcessors"); List> futures = new ArrayList<>(); int count = 0; @@ -3296,8 +3296,7 @@ private boolean loadTsFileToUnSequence( } // Listen before the tsFile is added into tsFile manager to avoid it being compacted - PipeInsertionDataNodeListener.getInstance() - .listenToTsFile(dataRegionId, tsFileResource, true, isGeneratedByPipe); + PipeInsertionDataNodeListener.getInstance().listenToTsFile(dataRegionId, tsFileResource, true); tsFileManager.add(tsFileResource, false); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java index c4fba1fb2154..992b5406708e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/task/InsertionCrossSpaceCompactionTask.java @@ -227,9 +227,9 @@ private void prepareTargetFiles() throws IOException { new File(targetTsFile.getPath() + ModificationFile.FILE_SUFFIX).toPath(), new File(sourceTsFile.getPath() + ModificationFile.FILE_SUFFIX).toPath()); } - targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndexAfterClose()); + targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndex()); targetFile.deserialize(); - targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndexAfterClose()); + targetFile.setProgressIndex(unseqFileToInsert.getMaxProgressIndex()); } private boolean recoverTaskInfoFromLogFile() throws IOException { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java index afbea98ec299..d0f79cb992b7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/utils/CompactionUtils.java @@ -287,8 +287,11 @@ public static void updateProgressIndexAndMark( List seqResources, List unseqResources) { for (TsFileResource targetResource : targetResources) { + // Initial value + targetResource.setGeneratedByPipe(true); + targetResource.setGeneratedByPipeConsensus(true); for (TsFileResource unseqResource : unseqResources) { - targetResource.updateProgressIndex(unseqResource.getMaxProgressIndexAfterClose()); + targetResource.updateProgressIndex(unseqResource.getMaxProgressIndex()); targetResource.setGeneratedByPipe( unseqResource.isGeneratedByPipe() && targetResource.isGeneratedByPipe()); targetResource.setGeneratedByPipeConsensus( @@ -296,7 +299,7 @@ public static void updateProgressIndexAndMark( && targetResource.isGeneratedByPipeConsensus()); } for (TsFileResource seqResource : seqResources) { - targetResource.updateProgressIndex(seqResource.getMaxProgressIndexAfterClose()); + targetResource.updateProgressIndex(seqResource.getMaxProgressIndex()); targetResource.setGeneratedByPipe( seqResource.isGeneratedByPipe() && targetResource.isGeneratedByPipe()); targetResource.setGeneratedByPipeConsensus( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/flush/NotifyFlushMemTable.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/flush/NotifyFlushMemTable.java index 9b730b7e3005..3eac19188e97 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/flush/NotifyFlushMemTable.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/flush/NotifyFlushMemTable.java @@ -36,11 +36,4 @@ public IMemTable copy() { public boolean isSignalMemTable() { return true; } - - @Override - public boolean isTotallyGeneratedByPipe() { - // Even though the `isTotallyGeneratedByPipe` for the corresponding memory table of this - // `NotifyFlushMemTable` might be true, we still return false to ensure data integrity. - return false; - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/AbstractMemTable.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/AbstractMemTable.java index 6df59ed12201..dcae00700ed8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/AbstractMemTable.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/AbstractMemTable.java @@ -67,7 +67,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Objects; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -122,8 +121,6 @@ public abstract class AbstractMemTable implements IMemTable { private static final String METRIC_POINT_IN = Metric.POINTS_IN.toString(); - private final AtomicBoolean isTotallyGeneratedByPipe = new AtomicBoolean(true); - protected AbstractMemTable() { this.database = null; this.dataRegionId = null; @@ -990,14 +987,4 @@ public void setDatabaseAndDataRegionId(String database, String dataRegionId) { this.database = database; this.dataRegionId = dataRegionId; } - - @Override - public void markAsNotGeneratedByPipe() { - this.isTotallyGeneratedByPipe.set(false); - } - - @Override - public boolean isTotallyGeneratedByPipe() { - return this.isTotallyGeneratedByPipe.get(); - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/IMemTable.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/IMemTable.java index 9de36b73eae8..3a7e9c55092f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/IMemTable.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/IMemTable.java @@ -203,9 +203,5 @@ void queryForDeviceRegionScan( void setDatabaseAndDataRegionId(String database, String dataRegionId); - void markAsNotGeneratedByPipe(); - - boolean isTotallyGeneratedByPipe(); - void updateMemtablePointCountMetric(InsertNode insertNode, int pointsInserted); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java index c91cb7838e34..451986b2f52d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/memtable/TsFileProcessor.java @@ -110,6 +110,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -187,6 +188,8 @@ public class TsFileProcessor { /** Total memtable size for mem control. */ private long totalMemTableSize; + private final AtomicBoolean isTotallyGeneratedByPipe = new AtomicBoolean(true); + private static final String FLUSH_QUERY_WRITE_LOCKED = "{}: {} get flushQueryLock write lock"; private static final String FLUSH_QUERY_WRITE_RELEASE = "{}: {} get flushQueryLock write lock released"; @@ -317,14 +320,11 @@ public void insert(InsertRowNode insertRowNode, long[] costsForMetrics) PipeDataNodeAgent.runtime().assignSimpleProgressIndexIfNeeded(insertRowNode); if (!insertRowNode.isGeneratedByPipe()) { - workMemTable.markAsNotGeneratedByPipe(); + this.isTotallyGeneratedByPipe.set(false); } PipeInsertionDataNodeListener.getInstance() .listenToInsertNode( - dataRegionInfo.getDataRegion().getDataRegionId(), - walFlushListener.getWalEntryHandler(), - insertRowNode, - tsFileResource); + dataRegionInfo.getDataRegion().getDataRegionId(), insertRowNode, tsFileResource); int pointInserted; if (insertRowNode.isAligned()) { @@ -418,14 +418,11 @@ public void insert(InsertRowsNode insertRowsNode, long[] costsForMetrics) PipeDataNodeAgent.runtime().assignSimpleProgressIndexIfNeeded(insertRowsNode); if (!insertRowsNode.isGeneratedByPipe()) { - workMemTable.markAsNotGeneratedByPipe(); + this.isTotallyGeneratedByPipe.set(false); } PipeInsertionDataNodeListener.getInstance() .listenToInsertNode( - dataRegionInfo.getDataRegion().getDataRegionId(), - walFlushListener.getWalEntryHandler(), - insertRowsNode, - tsFileResource); + dataRegionInfo.getDataRegion().getDataRegionId(), insertRowsNode, tsFileResource); int pointInserted = 0; for (InsertRowNode insertRowNode : insertRowsNode.getInsertRowNodeList()) { @@ -536,14 +533,11 @@ public void insertTablet( PipeDataNodeAgent.runtime().assignSimpleProgressIndexIfNeeded(insertTabletNode); if (!insertTabletNode.isGeneratedByPipe()) { - workMemTable.markAsNotGeneratedByPipe(); + this.isTotallyGeneratedByPipe.set(false); } PipeInsertionDataNodeListener.getInstance() .listenToInsertNode( - dataRegionInfo.getDataRegion().getDataRegionId(), - walFlushListener.getWalEntryHandler(), - insertTabletNode, - tsFileResource); + dataRegionInfo.getDataRegion().getDataRegionId(), insertTabletNode, tsFileResource); int pointInserted; try { @@ -1146,15 +1140,9 @@ public Future asyncClose() { // we have to add the memtable into flushingList first and then set the shouldClose tag. // see https://issues.apache.org/jira/browse/IOTDB-510 IMemTable tmpMemTable = workMemTable == null ? new NotifyFlushMemTable() : workMemTable; + tsFileResource.setGeneratedByPipe(isTotallyGeneratedByPipe.get()); try { - PipeInsertionDataNodeListener.getInstance() - .listenToTsFile( - dataRegionInfo.getDataRegion().getDataRegionId(), - tsFileResource, - false, - tmpMemTable.isTotallyGeneratedByPipe()); - // When invoke closing TsFile after insert data to memTable, we shouldn't flush until invoke // flushing memTable in System module. Future future = addAMemtableIntoFlushingList(tmpMemTable); @@ -1586,6 +1574,12 @@ private void endFile() throws IOException, TsFileProcessorException { logger.debug("Start to end file {}", tsFileResource); } writer.endFile(); + + // Listen after "endFile" to avoid unnecessary waiting for tsFile close + // before resource serialization to avoid missing hardlink after restart + PipeInsertionDataNodeListener.getInstance() + .listenToTsFile(dataRegionInfo.getDataRegion().getDataRegionId(), tsFileResource, false); + tsFileResource.serialize(); FileTimeIndexCacheRecorder.getInstance().logFileTimeIndex(tsFileResource); if (logger.isDebugEnabled()) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java index 734c1449dfa7..fb7d33035768 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileManager.java @@ -20,6 +20,7 @@ package org.apache.iotdb.db.storageengine.dataregion.tsfile; import org.apache.iotdb.commons.utils.TimePartitionUtils; +import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; import org.apache.iotdb.db.storageengine.dataregion.tsfile.timeindex.FileTimeIndexCacheRecorder; import org.apache.iotdb.db.storageengine.rescon.memory.TsFileResourceManager; @@ -276,6 +277,18 @@ public void replace( } finally { writeUnlock(); } + + // Currently disable + if (false) { + PipeDataNodeResourceManager.compaction() + .emitResult( + storageGroupName, + dataRegionId, + timePartition, + seqFileResources, + unseqFileResources, + targetFileResources); + } } public boolean contains(TsFileResource tsFileResource, boolean sequence) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java index 7761ea5c3b16..c1a67400f4de 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java @@ -28,7 +28,6 @@ import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.load.PartitionViolationException; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.assigner.PipeTsFileEpochProgressIndexKeeper; import org.apache.iotdb.db.schemaengine.schemaregion.utils.ResourceByPathUtils; import org.apache.iotdb.db.storageengine.dataregion.DataRegion; import org.apache.iotdb.db.storageengine.dataregion.compaction.selector.utils.InsertionCompactionCandidateStatus; @@ -166,13 +165,13 @@ public class TsFileResource { */ private TsFileResource originTsFileResource; - private ProgressIndex maxProgressIndex; + private final AtomicReference maxProgressIndex = new AtomicReference<>(); /** used to prevent circular replication in PipeConsensus */ - private boolean isGeneratedByPipeConsensus = false; + private volatile boolean isGeneratedByPipeConsensus = false; /** used to prevent circular replication in Pipe */ - private boolean isGeneratedByPipe = false; + private volatile boolean isGeneratedByPipe = false; private InsertionCompactionCandidateStatus insertionCompactionCandidateStatus = InsertionCompactionCandidateStatus.NOT_CHECKED; @@ -268,9 +267,9 @@ private void serializeTo(BufferedOutputStream outputStream) throws IOException { ReadWriteIOUtils.write((String) null, outputStream); } - if (maxProgressIndex != null) { + if (maxProgressIndex.get() != null) { TsFileResourceBlockType.PROGRESS_INDEX.serialize(outputStream); - maxProgressIndex.serialize(outputStream); + maxProgressIndex.get().serialize(outputStream); } else { TsFileResourceBlockType.EMPTY_BLOCK.serialize(outputStream); } @@ -302,7 +301,7 @@ public void deserialize() throws IOException { TsFileResourceBlockType.deserialize(ReadWriteIOUtils.readByte(inputStream)); switch (blockType) { case PROGRESS_INDEX: - maxProgressIndex = ProgressIndexType.deserializeFrom(inputStream); + maxProgressIndex.set(ProgressIndexType.deserializeFrom(inputStream)); break; case PIPE_MARK: isGeneratedByPipeConsensus = ReadWriteIOUtils.readBoolean(inputStream); @@ -1198,13 +1197,9 @@ public void updateProgressIndex(ProgressIndex progressIndex) { return; } - maxProgressIndex = - (maxProgressIndex == null - ? progressIndex - : maxProgressIndex.updateToMinimumEqualOrIsAfterProgressIndex(progressIndex)); - - PipeTsFileEpochProgressIndexKeeper.getInstance() - .updateProgressIndex(getDataRegionId(), getTsFilePath(), maxProgressIndex); + if (!maxProgressIndex.compareAndSet(null, progressIndex)) { + maxProgressIndex.get().updateToMinimumEqualOrIsAfterProgressIndex(progressIndex); + } } public void setProgressIndex(ProgressIndex progressIndex) { @@ -1212,22 +1207,12 @@ public void setProgressIndex(ProgressIndex progressIndex) { return; } - maxProgressIndex = progressIndex; - - PipeTsFileEpochProgressIndexKeeper.getInstance() - .updateProgressIndex(getDataRegionId(), getTsFilePath(), maxProgressIndex); - } - - public ProgressIndex getMaxProgressIndexAfterClose() throws IllegalStateException { - if (getStatus().equals(TsFileResourceStatus.UNCLOSED)) { - throw new IllegalStateException( - "Should not get progress index from a unclosing TsFileResource."); - } - return getMaxProgressIndex(); + maxProgressIndex.set(progressIndex); } public ProgressIndex getMaxProgressIndex() { - return maxProgressIndex == null ? MinimumProgressIndex.INSTANCE : maxProgressIndex; + final ProgressIndex index = maxProgressIndex.get(); + return index == null ? MinimumProgressIndex.INSTANCE : index; } public boolean isEmpty() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java index edc30604a6e2..36c404faa7c2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALBuffer.java @@ -342,7 +342,6 @@ private void handleInfoEntry(WALEntry walEntry) { info.metaData.add(size, searchIndex, walEntry.getMemTableId()); info.memTableId2WalDiskUsage.compute( walEntry.getMemTableId(), (k, v) -> v == null ? size : v + size); - walEntry.getWalFlushListener().getWalEntryHandler().setSize(size); info.fsyncListeners.add(walEntry.getWalFlushListener()); } @@ -593,13 +592,8 @@ public void run() { // notify all waiting listeners if (forceSuccess) { - long position = lastFsyncPosition; for (WALFlushListener fsyncListener : info.fsyncListeners) { fsyncListener.succeed(); - if (fsyncListener.getWalEntryHandler() != null) { - fsyncListener.getWalEntryHandler().setEntryPosition(walFileVersionId, position); - position += fsyncListener.getWalEntryHandler().getSize(); - } } lastFsyncPosition = currentWALFileWriter.originalSize(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java index a8e94e2780a3..18304e621b7e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/buffer/WALEntry.java @@ -77,14 +77,14 @@ protected WALEntry(long memTableId, WALEntryValue value, boolean wait) { } else { throw new RuntimeException("Unknown WALEntry type"); } - walFlushListener = new WALFlushListener(wait, value); + walFlushListener = new WALFlushListener(wait); } protected WALEntry(WALEntryType type, long memTableId, WALEntryValue value, boolean wait) { this.type = type; this.memTableId = memTableId; this.value = value; - this.walFlushListener = new WALFlushListener(wait, value); + this.walFlushListener = new WALFlushListener(wait); } public abstract void serialize(IWALByteBufferView buffer); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java index afa6651dfa48..6f801a78457c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManager.java @@ -25,11 +25,9 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.service.metrics.WritingMetrics; import org.apache.iotdb.db.storageengine.dataregion.memtable.TsFileProcessor; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; import org.apache.iotdb.db.storageengine.dataregion.wal.io.CheckpointWriter; import org.apache.iotdb.db.storageengine.dataregion.wal.io.ILogWriter; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.CheckpointFileUtils; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -177,9 +175,7 @@ public void makeFlushMemTableCP(long memTableId) { return; } memTableInfo.setFlushed(); - if (!memTableInfo.isPinned()) { - memTableId2Info.remove(memTableId); - } + memTableId2Info.remove(memTableId); Checkpoint checkpoint = new Checkpoint( CheckpointType.FLUSH_MEMORY_TABLE, Collections.singletonList(memTableInfo)); @@ -261,71 +257,9 @@ private boolean tryRollingLogWriter() throws IOException { // endregion - // region methods for pipe - /** - * Pin the wal files of the given memory table. Notice: cannot pin one memTable too long, - * otherwise the wal disk usage may too large. - * - * @throws MemTablePinException If the memTable has been flushed - */ - public void pinMemTable(long memTableId) throws MemTablePinException { - infoLock.lock(); - try { - if (!memTableId2Info.containsKey(memTableId)) { - throw new MemTablePinException( - String.format( - "Fail to pin memTable-%d because this memTable doesn't exist in the wal.", - memTableId)); - } - MemTableInfo memTableInfo = memTableId2Info.get(memTableId); - if (!memTableInfo.isPinned()) { - WALInsertNodeCache.getInstance().addMemTable(memTableId); - } - memTableInfo.pin(); - } finally { - infoLock.unlock(); - } - } - - /** - * Unpin the wal files of the given memory table. - * - * @throws MemTablePinException If there aren't corresponding pin operations - */ - public void unpinMemTable(long memTableId) throws MemTablePinException { - infoLock.lock(); - try { - if (!memTableId2Info.containsKey(memTableId)) { - throw new MemTablePinException( - String.format( - "Fail to unpin memTable-%d because this memTable doesn't exist in the wal.", - memTableId)); - } - if (!memTableId2Info.get(memTableId).isPinned()) { - throw new MemTablePinException( - String.format( - "Fail to unpin memTable-%d because this memTable hasn't been pinned.", memTableId)); - } - MemTableInfo memTableInfo = memTableId2Info.get(memTableId); - memTableInfo.unpin(); - if (!memTableInfo.isPinned()) { - WALInsertNodeCache.getInstance().removeMemTable(memTableId); - if (memTableInfo.isFlushed()) { - memTableId2Info.remove(memTableId); - } - } - } finally { - infoLock.unlock(); - } - } - - // endregion - - /** Get MemTableInfo of oldest unpinned MemTable, whose first version id is smallest. */ - public MemTableInfo getOldestUnpinnedMemTableInfo() { + public MemTableInfo getOldestMemTableInfo() { // find oldest memTable return activeOrPinnedMemTables().stream() - .filter(memTableInfo -> !memTableInfo.isPinned()) .min(Comparator.comparingLong(MemTableInfo::getMemTableId)) .orElse(null); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java index 59c2a3be23b5..984006b25bc0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/MemTableInfo.java @@ -47,8 +47,7 @@ public class MemTableInfo implements WALEntryValue { // memTable private IMemTable memTable; - // memTable pin count - private int pinCount; + // memTable is flushed or not private boolean flushed; // data region id @@ -116,22 +115,6 @@ public IMemTable getMemTable() { return memTable; } - public void pin() { - this.pinCount++; - } - - public void unpin() { - this.pinCount--; - } - - public boolean isPinned() { - return pinCount > 0; - } - - public int getPinCount() { - return pinCount; - } - public boolean isFlushed() { return flushed; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java index b03b27a69948..2f257da9adc4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/io/WALByteBufReader.java @@ -20,7 +20,6 @@ package org.apache.iotdb.db.storageengine.dataregion.wal.io; import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryPosition; import java.io.Closeable; import java.io.DataInputStream; @@ -50,18 +49,6 @@ public WALByteBufReader(File logFile) throws IOException { } } - public WALByteBufReader(WALEntryPosition walEntryPosition) throws IOException { - WALInputStream walInputStream = walEntryPosition.openReadFileStream(); - try { - this.logStream = new DataInputStream(walInputStream); - this.metaData = walInputStream.getWALMetaData(); - this.sizeIterator = metaData.getBuffersSize().iterator(); - } catch (Exception e) { - walInputStream.close(); - throw e; - } - } - /** Like {@link Iterator#hasNext()}. */ public boolean hasNext() { return sizeIterator.hasNext(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java index 38b69f1162b4..dfa7bf6bb9dd 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALFakeNode.java @@ -41,9 +41,9 @@ private WALFakeNode(Status status) { public WALFakeNode(Status status, Exception cause) { this.status = status; - this.successListener = new WALFlushListener(false, null); + this.successListener = new WALFlushListener(false); this.successListener.succeed(); - this.failListener = new WALFlushListener(false, null); + this.failListener = new WALFlushListener(false); this.failListener.fail(cause); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java index 35432a8fd6ff..003c74763da7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNode.java @@ -49,7 +49,6 @@ import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.CheckpointManager; import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.CheckpointType; import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.MemTableInfo; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; import org.apache.iotdb.db.storageengine.dataregion.wal.io.WALByteBufReader; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALFileStatus; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALFileUtils; @@ -57,7 +56,6 @@ import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.AbstractResultListener.Status; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener; -import org.apache.commons.lang3.StringUtils; import org.apache.tsfile.fileSystem.FSFactoryProducer; import org.apache.tsfile.utils.TsFileUtils; import org.slf4j.Logger; @@ -183,7 +181,6 @@ private WALFlushListener log(WALEntry walEntry) { buffer.write(walEntry); // set handler for pipe - walEntry.getWalFlushListener().getWalEntryHandler().setWalNode(this, walEntry.getMemTableId()); return walEntry.getWalFlushListener(); } @@ -228,25 +225,6 @@ public void setDeleted(boolean deleted) { // region methods for pipe - /** - * Pin the wal files of the given memory table. Notice: cannot pin one memTable too long, - * otherwise the wal disk usage may too large. - * - * @throws MemTablePinException If the memTable has been flushed - */ - public void pinMemTable(long memTableId) throws MemTablePinException { - checkpointManager.pinMemTable(memTableId); - } - - /** - * Unpin the wal files of the given memory table. - * - * @throws MemTablePinException If there aren't corresponding pin operations - */ - public void unpinMemTable(long memTableId) throws MemTablePinException { - checkpointManager.unpinMemTable(memTableId); - } - // endregion // region Task to delete outdated .wal files @@ -270,8 +248,6 @@ private class DeleteOutdatedFileTask implements Runnable { // the effective information ratio private double effectiveInfoRatio = 1.0d; - private List pinnedMemTableIds; - private int fileIndexAfterFilterSafelyDeleteIndex = Integer.MAX_VALUE; private List successfullyDeleted; private long deleteFileSize; @@ -297,7 +273,6 @@ private boolean initAndCheckIfNeedContinue() { this.sortedWalFilesExcludingLast = Arrays.copyOfRange(allWalFilesOfOneNode, 0, allWalFilesOfOneNode.length - 1); this.activeOrPinnedMemTables = checkpointManager.activeOrPinnedMemTables(); - this.pinnedMemTableIds = initPinnedMemTableIds(); this.fileIndexAfterFilterSafelyDeleteIndex = initFileIndexAfterFilterSafelyDeleteIndex(); this.successfullyDeleted = new ArrayList<>(); this.deleteFileSize = 0; @@ -318,20 +293,6 @@ public void rollWalFileIfHaveNoActiveMemTable() { } } - private List initPinnedMemTableIds() { - List memTableInfos = checkpointManager.activeOrPinnedMemTables(); - if (memTableInfos.isEmpty()) { - return new ArrayList<>(); - } - List pinnedIds = new ArrayList<>(); - for (MemTableInfo memTableInfo : memTableInfos) { - if (memTableInfo.isFlushed() && memTableInfo.isPinned()) { - pinnedIds.add(memTableInfo.getMemTableId()); - } - } - return pinnedIds; - } - @Override public void run() { // The intent of the loop execution here is to try to get as many memTable flush or snapshot @@ -365,7 +326,7 @@ public void run() { private void updateEffectiveInfoRationAndUpdateMetric() { // calculate effective information ratio long costOfActiveMemTables = checkpointManager.getTotalCostOfActiveMemTables(); - MemTableInfo oldestUnpinnedMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo(); + MemTableInfo oldestUnpinnedMemTableInfo = checkpointManager.getOldestMemTableInfo(); long avgFileSize = getFileNum() != 0 ? getDiskUsage() / getFileNum() @@ -389,45 +350,10 @@ private void updateEffectiveInfoRationAndUpdateMetric() { } private void summarizeExecuteResult() { - if (!pinnedMemTableIds.isEmpty() - || fileIndexAfterFilterSafelyDeleteIndex < sortedWalFilesExcludingLast.length) { - if (logger.isDebugEnabled()) { - StringBuilder summary = - new StringBuilder( - String.format( - "wal node-%s delete outdated files summary:the range is: [%d,%d], delete successful is [%s], safely delete file index is: [%s].The following reasons influenced the result: %s", - identifier, - WALFileUtils.parseVersionId(sortedWalFilesExcludingLast[0].getName()), - WALFileUtils.parseVersionId( - sortedWalFilesExcludingLast[sortedWalFilesExcludingLast.length - 1] - .getName()), - StringUtils.join(successfullyDeleted, ","), - fileIndexAfterFilterSafelyDeleteIndex, - System.lineSeparator())); - - if (!pinnedMemTableIds.isEmpty()) { - summary - .append("- MemTable has been flushed but pinned by PIPE, the MemTableId list is : ") - .append(StringUtils.join(pinnedMemTableIds, ",")) - .append(".") - .append(System.lineSeparator()); - } - if (fileIndexAfterFilterSafelyDeleteIndex < sortedWalFilesExcludingLast.length) { - summary.append( - String.format( - "- The data in the wal file was not consumed by the consensus group,current search index is %d, safely delete index is %d", - getCurrentSearchIndex(), safelyDeletedSearchIndex)); - } - String summaryLog = summary.toString(); - logger.debug(summaryLog); - } - - } else { - logger.debug( - "Successfully delete {} outdated wal files for wal node-{}", - successfullyDeleted.size(), - identifier); - } + logger.debug( + "Successfully delete {} outdated wal files for wal node-{}", + successfullyDeleted.size(), + identifier); } /** Delete obsolete wal files while recording which succeeded or failed */ @@ -475,20 +401,10 @@ private boolean trySnapshotOrFlushMemTable() { return false; } // find oldest memTable - MemTableInfo oldestMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo(); + MemTableInfo oldestMemTableInfo = checkpointManager.getOldestMemTableInfo(); if (oldestMemTableInfo == null) { return false; } - if (oldestMemTableInfo.isPinned()) { - logger.warn( - "Pipe: Effective information ratio {} of wal node-{} is below wal min effective info ratio {}. But fail to delete memTable-{}'s wal files because they are pinned by the Pipe module. Pin count: {}.", - effectiveInfoRatio, - identifier, - config.getWalMinEffectiveInfoRatio(), - oldestMemTableInfo.getMemTableId(), - oldestMemTableInfo.getPinCount()); - return false; - } IMemTable oldestMemTable = oldestMemTableInfo.getMemTable(); if (oldestMemTable == null) { return false; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryHandler.java deleted file mode 100644 index f5d7406f5a60..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryHandler.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.storageengine.dataregion.wal.utils; - -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; -import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry; -import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.WALPipeException; -import org.apache.iotdb.db.storageengine.dataregion.wal.node.WALNode; - -import org.apache.tsfile.utils.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicReference; - -/** - * This handler is used by the Pipe to find the corresponding {@link InsertNode}. Besides, it can - * try to pin/unpin the {@link WALEntry}s by the memTable id. - */ -public class WALEntryHandler { - - private static final Logger logger = LoggerFactory.getLogger(WALEntryHandler.class); - - private long memTableId = -1; - - // cached value, null after this value is flushed to wal successfully - @SuppressWarnings("squid:S3077") - private volatile WALEntryValue value; - - // wal entry's position in the wal, valid after the value is flushed to wal successfully - // it's safe to use volatile here to make this reference thread-safe. - @SuppressWarnings("squid:S3077") - private final WALEntryPosition walEntryPosition = new WALEntryPosition(); - - // wal node, null when wal is disabled - private WALNode walNode = null; - - private volatile boolean isHardlink = false; - private final AtomicReference hardlinkFile = new AtomicReference<>(); - - public WALEntryHandler(final WALEntryValue value) { - this.value = value; - } - - /** - * Pin the wal files of the given memory table. Notice: cannot pin one memTable too long, - * otherwise the wal disk usage may too large. - * - * @throws MemTablePinException If the memTable has been flushed - */ - public void pinMemTable() throws MemTablePinException { - if (walNode == null || memTableId < 0) { - throw new MemTablePinException("Fail to pin memTable because of internal error."); - } - walNode.pinMemTable(memTableId); - } - - /** - * Unpin the wal files of the given memory table. - * - * @throws MemTablePinException If there aren't corresponding pin operations - */ - public void unpinMemTable() throws MemTablePinException { - if (walNode == null || memTableId < 0) { - throw new MemTablePinException("Fail to pin memTable because of internal error."); - } - walNode.unpinMemTable(memTableId); - } - - public InsertNode getInsertNodeViaCacheIfPossible() { - try { - final WALEntryValue finalValue = value; - if (finalValue instanceof InsertNode) { - return (InsertNode) finalValue; - } - final Pair byteBufferInsertNodePair = - walEntryPosition.getByteBufferOrInsertNodeIfPossible(); - return byteBufferInsertNodePair == null ? null : byteBufferInsertNodePair.getRight(); - } catch (final Exception e) { - logger.warn("Fail to get insert node via cache. {}", this, e); - throw e; - } - } - - /** - * Get this handler's value. - * - * @throws WALPipeException when failing to get the value. - */ - public InsertNode getInsertNode() throws WALPipeException { - // return local cache - final WALEntryValue res = value; - if (res != null) { - if (res instanceof InsertNode) { - return (InsertNode) res; - } else { - throw new WALPipeException("Fail to get value because the entry type isn't InsertNode."); - } - } - - // wait until the position is ready - while (!walEntryPosition.canRead()) { - try { - synchronized (this) { - this.wait(); - } - } catch (final InterruptedException e) { - logger.warn("Interrupted when waiting for result.", e); - Thread.currentThread().interrupt(); - } - } - - final InsertNode node = isHardlink ? readFromHardlinkWALFile() : readFromOriginalWALFile(); - if (node == null) { - throw new WALPipeException( - String.format("Fail to get the wal value of the position %s.", walEntryPosition)); - } - return node; - } - - public ByteBuffer getByteBuffer() throws WALPipeException { - // wait until the position is ready - while (!walEntryPosition.canRead()) { - try { - synchronized (this) { - this.wait(); - } - } catch (InterruptedException e) { - logger.warn("Interrupted when waiting for result.", e); - Thread.currentThread().interrupt(); - } - } - - final ByteBuffer buffer = readByteBufferFromWALFile(); - if (buffer == null) { - throw new WALPipeException( - String.format("Fail to get the wal value of the position %s.", walEntryPosition)); - } - return buffer; - } - - private InsertNode readFromOriginalWALFile() throws WALPipeException { - try { - return walEntryPosition.readInsertNodeViaCacheAfterCanRead(); - } catch (Exception e) { - throw new WALPipeException("Fail to get value because the file content isn't correct.", e); - } - } - - private InsertNode readFromHardlinkWALFile() throws WALPipeException { - try { - return walEntryPosition.readInsertNodeViaCacheAfterCanRead(); - } catch (Exception e) { - throw new WALPipeException("Fail to get value because the file content isn't correct.", e); - } - } - - private ByteBuffer readByteBufferFromWALFile() throws WALPipeException { - try { - return walEntryPosition.readByteBufferViaCacheAfterCanRead(); - } catch (Exception e) { - throw new WALPipeException("Fail to get value because the file content isn't correct.", e); - } - } - - public void setWalNode(final WALNode walNode, final long memTableId) { - this.walNode = walNode; - this.memTableId = memTableId; - walEntryPosition.setWalNode(walNode, memTableId); - } - - public long getMemTableId() { - return memTableId; - } - - public void setEntryPosition(final long walFileVersionId, final long position) { - this.walEntryPosition.setEntryPosition(walFileVersionId, position, value); - this.value = null; - synchronized (this) { - this.notifyAll(); - } - } - - public WALEntryPosition getWalEntryPosition() { - return walEntryPosition; - } - - public int getSize() { - return walEntryPosition.getSize(); - } - - public void setSize(final int size) { - this.walEntryPosition.setSize(size); - } - - public void hardlinkTo(File hardlinkFile) { - isHardlink = true; - this.hardlinkFile.set(hardlinkFile); - } - - @Override - public String toString() { - return "WALEntryHandler{" - + "memTableId=" - + memTableId - + ", value=" - + value - + ", walEntryPosition=" - + walEntryPosition - + '}'; - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryPosition.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryPosition.java deleted file mode 100644 index 4d71cb1030c3..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALEntryPosition.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.storageengine.dataregion.wal.utils; - -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; -import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue; -import org.apache.iotdb.db.storageengine.dataregion.wal.io.WALInputStream; -import org.apache.iotdb.db.storageengine.dataregion.wal.node.WALNode; - -import org.apache.tsfile.utils.Pair; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.StandardOpenOption; -import java.util.Objects; - -/** - * This class uses the tuple(identifier, file, position) to denote the position of the wal entry, - * and give some methods to read the content from the disk. - */ -public class WALEntryPosition { - private volatile String identifier = ""; - private volatile long walFileVersionId = -1; - private volatile long position; - private volatile int size; - // wal node, null when wal is disabled - private WALNode walNode = null; - // wal file is not null when openReadFileChannel method has been called - private File walFile = null; - // cache for wal entry - private WALInsertNodeCache cache = null; - - private static final String ENTRY_NOT_READY_MESSAGE = "This entry isn't ready for read."; - - public WALEntryPosition() {} - - public WALEntryPosition(String identifier, long walFileVersionId, long position, int size) { - this.identifier = identifier; - this.walFileVersionId = walFileVersionId; - this.position = position; - this.size = size; - } - - /** - * Try to read the wal entry directly from the cache. No need to check if the wal entry is ready - * for read. - */ - public Pair getByteBufferOrInsertNodeIfPossible() { - return cache.getByteBufferOrInsertNodeIfPossible(this); - } - - /** - * Read the wal entry and parse it to the InsertNode. Use LRU cache to accelerate read. - * - * @throws IOException failing to read. - */ - public InsertNode readInsertNodeViaCacheAfterCanRead() throws IOException { - if (!canRead()) { - throw new IOException(ENTRY_NOT_READY_MESSAGE); - } - return cache.getInsertNode(this); - } - - /** - * Read the wal entry and get the raw bytebuffer. Use LRU cache to accelerate read. - * - * @throws IOException failing to read. - */ - public ByteBuffer readByteBufferViaCacheAfterCanRead() throws IOException { - if (!canRead()) { - throw new IOException(ENTRY_NOT_READY_MESSAGE); - } - return cache.getByteBuffer(this); - } - - /** - * Read the byte buffer directly. - * - * @throws IOException failing to read. - */ - ByteBuffer read() throws IOException { - if (!canRead()) { - throw new IOException("Target file hasn't been specified."); - } - // TODO: Reuse the file stream - try (WALInputStream is = openReadFileStream()) { - is.skipToGivenLogicalPosition(position); - ByteBuffer buffer = ByteBuffer.allocate(size); - is.read(buffer); - return buffer; - } - } - - /** - * Open the read file channel for this wal entry, this method will retry automatically when the - * file is sealed when opening the file channel. - * - * @throws IOException failing to open the file channel. - */ - public FileChannel openReadFileChannel() throws IOException { - if (isInSealedFile()) { - walFile = walNode.getWALFile(walFileVersionId); - return FileChannel.open(walFile.toPath(), StandardOpenOption.READ); - } else { - try { - walFile = walNode.getWALFile(walFileVersionId); - return FileChannel.open(walFile.toPath(), StandardOpenOption.READ); - } catch (IOException e) { - // unsealed file may be renamed after sealed, so we should try again - if (isInSealedFile()) { - walFile = walNode.getWALFile(walFileVersionId); - return FileChannel.open(walFile.toPath(), StandardOpenOption.READ); - } else { - throw e; - } - } - } - } - - public WALInputStream openReadFileStream() throws IOException { - // TODO: Refactor this part of code - if (isInSealedFile()) { - walFile = walNode.getWALFile(walFileVersionId); - return new WALInputStream(walFile); - } else { - try { - walFile = walNode.getWALFile(walFileVersionId); - return new WALInputStream(walFile); - } catch (IOException e) { - // unsealed file may be renamed after sealed, so we should try again - if (isInSealedFile()) { - walFile = walNode.getWALFile(walFileVersionId); - return new WALInputStream(walFile); - } else { - throw e; - } - } - } - } - - public File getWalFile() { - return walFile; - } - - /** Return true only when the tuple(file, position, size) is ready. */ - public boolean canRead() { - return walFileVersionId >= 0; - } - - /** Return true only when this wal file is sealed. */ - public boolean isInSealedFile() { - if (walNode == null || !canRead()) { - throw new RuntimeException(ENTRY_NOT_READY_MESSAGE); - } - return walFileVersionId < walNode.getCurrentWALFileVersion(); - } - - public void setWalNode(WALNode walNode, long memTableId) { - this.walNode = walNode; - identifier = walNode.getIdentifier(); - cache = WALInsertNodeCache.getInstance(); - } - - public String getIdentifier() { - return identifier; - } - - public void setEntryPosition(long walFileVersionId, long position, WALEntryValue value) { - this.position = position; - this.walFileVersionId = walFileVersionId; - if (cache != null && value instanceof InsertNode) { - cache.cacheInsertNodeIfNeeded(this, (InsertNode) value); - } - } - - public long getPosition() { - return position; - } - - public long getWalFileVersionId() { - return walFileVersionId; - } - - public void setSize(int size) { - this.size = size; - } - - public int getSize() { - return size; - } - - @Override - public int hashCode() { - return Objects.hash(identifier, walFileVersionId, position); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - WALEntryPosition that = (WALEntryPosition) o; - return identifier.equals(that.identifier) - && walFileVersionId == that.walFileVersionId - && position == that.position; - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCache.java deleted file mode 100644 index f69dc4a08263..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCache.java +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.storageengine.dataregion.wal.utils; - -import org.apache.iotdb.commons.pipe.config.PipeConfig; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.pipe.resource.PipeDataNodeResourceManager; -import org.apache.iotdb.db.pipe.resource.memory.InsertNodeMemoryEstimator; -import org.apache.iotdb.db.pipe.resource.memory.PipeMemoryBlockType; -import org.apache.iotdb.db.pipe.resource.memory.PipeModelFixedMemoryBlock; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNode; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertNode; -import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry; -import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryType; -import org.apache.iotdb.db.storageengine.dataregion.wal.io.WALByteBufReader; - -import com.github.benmanes.caffeine.cache.CacheLoader; -import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.LoadingCache; -import com.github.benmanes.caffeine.cache.Weigher; -import org.apache.tsfile.utils.Pair; -import org.checkerframework.checker.nullness.qual.NonNull; -import org.checkerframework.checker.nullness.qual.Nullable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -/** This cache is used by {@link WALEntryPosition}. */ -public class WALInsertNodeCache { - - private static final Logger LOGGER = LoggerFactory.getLogger(WALInsertNodeCache.class); - private static final IoTDBConfig CONFIG = IoTDBDescriptor.getInstance().getConfig(); - private static final PipeConfig PIPE_CONFIG = PipeConfig.getInstance(); - - private static PipeModelFixedMemoryBlock walModelFixedMemory = null; - - // LRU cache, find Pair by WALEntryPosition - private final LoadingCache> lruCache; - - // ids of all pinned memTables - private final Set memTablesNeedSearch = ConcurrentHashMap.newKeySet(); - - private volatile boolean hasPipeRunning = false; - - private WALInsertNodeCache() { - if (walModelFixedMemory == null) { - init(); - } - - final long requestedAllocateSize = - (long) - (PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes() - * PIPE_CONFIG.getPipeDataStructureWalMemoryProportion()); - - lruCache = - Caffeine.newBuilder() - .maximumWeight(requestedAllocateSize) - .weigher( - (Weigher>) - (position, pair) -> { - long weightInLong = 0L; - if (pair.right != null) { - weightInLong = InsertNodeMemoryEstimator.sizeOf(pair.right); - } else { - weightInLong = position.getSize(); - } - if (weightInLong <= 0) { - return Integer.MAX_VALUE; - } - final int weightInInt = (int) weightInLong; - return weightInInt != weightInLong ? Integer.MAX_VALUE : weightInInt; - }) - .recordStats() - .build(new WALInsertNodeCacheLoader()); - } - - // please call this method at PipeLauncher - public static void init() { - if (walModelFixedMemory != null) { - return; - } - try { - // Allocate memory for the fixed memory block of WAL - walModelFixedMemory = - PipeDataNodeResourceManager.memory() - .forceAllocateForModelFixedMemoryBlock( - (long) - (PipeDataNodeResourceManager.memory().getTotalNonFloatingMemorySizeInBytes() - * PIPE_CONFIG.getPipeDataStructureWalMemoryProportion()), - PipeMemoryBlockType.WAL); - } catch (Exception e) { - LOGGER.error("Failed to initialize WAL model fixed memory block", e); - walModelFixedMemory = - PipeDataNodeResourceManager.memory() - .forceAllocateForModelFixedMemoryBlock(0, PipeMemoryBlockType.WAL); - } - } - - /////////////////////////// Getter & Setter /////////////////////////// - - public InsertNode getInsertNode(final WALEntryPosition position) { - final Pair pair = getByteBufferOrInsertNode(position); - - if (pair.getRight() != null) { - return pair.getRight(); - } - - if (pair.getLeft() == null) { - throw new IllegalStateException(); - } - - try { - // multi pipes may share the same wal entry, so we need to wrap the byte[] into - // different ByteBuffer for each pipe - final InsertNode insertNode = parse(ByteBuffer.wrap(pair.getLeft().array())); - pair.setRight(insertNode); - return insertNode; - } catch (final Exception e) { - LOGGER.error( - "Parsing failed when recovering insertNode from wal, walFile:{}, position:{}, size:{}, exception:", - position.getWalFile(), - position.getPosition(), - position.getSize(), - e); - throw e; - } - } - - private InsertNode parse(final ByteBuffer buffer) { - final PlanNode node = WALEntry.deserializeForConsensus(buffer); - if (node instanceof InsertNode) { - return (InsertNode) node; - } else { - return null; - } - } - - public ByteBuffer getByteBuffer(final WALEntryPosition position) { - Pair pair = getByteBufferOrInsertNode(position); - - if (pair.getLeft() != null) { - // multi pipes may share the same wal entry, so we need to wrap the byte[] into - // different ByteBuffer for each pipe - return ByteBuffer.wrap(pair.getLeft().array()); - } - - // forbid multi threads to invalidate and load the same entry - synchronized (this) { - lruCache.invalidate(position); - pair = getByteBufferOrInsertNode(position); - } - - if (pair.getLeft() == null) { - throw new IllegalStateException(); - } - - return ByteBuffer.wrap(pair.getLeft().array()); - } - - public Pair getByteBufferOrInsertNode(final WALEntryPosition position) { - hasPipeRunning = true; - - final Pair pair = lruCache.get(position); - - if (pair == null) { - throw new IllegalStateException(); - } - - return pair; - } - - public Pair getByteBufferOrInsertNodeIfPossible( - final WALEntryPosition position) { - hasPipeRunning = true; - return lruCache.getIfPresent(position); - } - - public void cacheInsertNodeIfNeeded( - final WALEntryPosition walEntryPosition, final InsertNode insertNode) { - // reduce memory usage - if (hasPipeRunning) { - lruCache.put(walEntryPosition, new Pair<>(null, insertNode)); - } - } - - //////////////////////////// APIs provided for metric framework //////////////////////////// - - public double getCacheHitRate() { - return Objects.nonNull(lruCache) ? lruCache.stats().hitRate() : 0; - } - - public double getCacheHitCount() { - return Objects.nonNull(lruCache) ? lruCache.stats().hitCount() : 0; - } - - public double getCacheRequestCount() { - return Objects.nonNull(lruCache) ? lruCache.stats().requestCount() : 0; - } - - /////////////////////////// MemTable /////////////////////////// - - public void addMemTable(final long memTableId) { - memTablesNeedSearch.add(memTableId); - } - - public void removeMemTable(final long memTableId) { - memTablesNeedSearch.remove(memTableId); - } - - /////////////////////////// Cache Loader /////////////////////////// - - class WALInsertNodeCacheLoader - implements CacheLoader> { - - @Override - public @Nullable Pair load(@NonNull final WALEntryPosition key) - throws Exception { - return new Pair<>(key.read(), null); - } - - /** Batch load all wal entries in the file when any one key is absent. */ - @Override - public @NonNull Map<@NonNull WALEntryPosition, @NonNull Pair> loadAll( - @NonNull final Iterable walEntryPositions) { - final Map> loadedEntries = new HashMap<>(); - - for (final WALEntryPosition walEntryPosition : walEntryPositions) { - if (loadedEntries.containsKey(walEntryPosition) || !walEntryPosition.canRead()) { - continue; - } - - final long walFileVersionId = walEntryPosition.getWalFileVersionId(); - - // load one when wal file is not sealed - if (!walEntryPosition.isInSealedFile()) { - try { - loadedEntries.put(walEntryPosition, load(walEntryPosition)); - } catch (final Exception e) { - LOGGER.info( - "Fail to cache wal entries from the wal file with version id {}", - walFileVersionId, - e); - } - continue; - } - - // batch load when wal file is sealed - long position = 0; - try (final WALByteBufReader walByteBufReader = new WALByteBufReader(walEntryPosition)) { - while (walByteBufReader.hasNext()) { - // see WALInfoEntry#serialize, entry type + memtable id + plan node type - final ByteBuffer buffer = walByteBufReader.next(); - - final int size = buffer.capacity(); - final WALEntryType type = WALEntryType.valueOf(buffer.get()); - final long memTableId = buffer.getLong(); - - if ((memTablesNeedSearch.contains(memTableId) - || walEntryPosition.getPosition() == position) - && type.needSearch()) { - buffer.clear(); - loadedEntries.put( - new WALEntryPosition( - walEntryPosition.getIdentifier(), walFileVersionId, position, size), - new Pair<>(buffer, null)); - } - - position += size; - } - } catch (final IOException e) { - LOGGER.info( - "Fail to cache wal entries from the wal file with version id {}", - walFileVersionId, - e); - } - } - - return loadedEntries; - } - } - - /////////////////////////// Singleton /////////////////////////// - - public static WALInsertNodeCache getInstance() { - return InstanceHolder.INSTANCE; - } - - private static class InstanceHolder { - - public static final WALInsertNodeCache INSTANCE = new WALInsertNodeCache(); - - private InstanceHolder() { - // forbidding instantiation - } - } - - /////////////////////////// Test Only /////////////////////////// - - @TestOnly - boolean contains(WALEntryPosition position) { - return lruCache.getIfPresent(position) != null; - } - - @TestOnly - public void clear() { - lruCache.invalidateAll(); - memTablesNeedSearch.clear(); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java index 8c84a0cb0fe5..7896ea01dcfb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/listener/WALFlushListener.java @@ -19,20 +19,10 @@ package org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener; -import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntryValue; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; - /** This class helps judge whether wal is flushed to the storage device. */ public class WALFlushListener extends AbstractResultListener { - // handler for pipeline, only exists when value is InsertNode - private final WALEntryHandler walEntryHandler; - public WALFlushListener(boolean wait, WALEntryValue value) { + public WALFlushListener(boolean wait) { super(wait); - walEntryHandler = new WALEntryHandler(value); - } - - public WALEntryHandler getWalEntryHandler() { - return walEntryHandler; } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/LoadTsFileManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/LoadTsFileManager.java index 25b594631aa2..28c952b3d726 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/LoadTsFileManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/LoadTsFileManager.java @@ -505,6 +505,7 @@ private void loadAll( final DataRegion dataRegion = entry.getKey().getDataRegion(); final TsFileResource tsFileResource = dataPartition2Resource.get(entry.getKey()); + tsFileResource.setGeneratedByPipe(isGeneratedByPipe); endTsFileResource( writer, tsFileResource, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java index 7dfe91ad62e3..cb147f9c2861 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTreeStatementDataTypeConvertExecutionVisitor.java @@ -20,9 +20,6 @@ package org.apache.iotdb.db.storageengine.load.converter; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.concurrent.IoTThreadFactory; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.concurrent.threadpool.WrappedThreadPoolExecutor; import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.pipe.connector.payload.evolvable.request.PipeTransferTabletRawReq; @@ -48,14 +45,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Optional; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.RejectedExecutionHandler; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.apache.iotdb.db.pipe.resource.memory.PipeMemoryWeightUtil.calculateTabletSizeInBytes; @@ -70,9 +59,6 @@ public class LoadTreeStatementDataTypeConvertExecutionVisitor .getConfig() .getLoadTsFileTabletConversionBatchMemorySizeInBytes(); - private static final AtomicReference executorPool = - new AtomicReference<>(); - private final StatementExecutor statementExecutor; @FunctionalInterface @@ -80,21 +66,6 @@ public interface StatementExecutor { TSStatus execute(final Statement statement); } - public static class CallerBlocksPolicy implements RejectedExecutionHandler { - public CallerBlocksPolicy() {} - - public void rejectedExecution(Runnable r, ThreadPoolExecutor e) { - if (!e.isShutdown()) { - try { - e.getQueue().put(r); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - throw new RejectedExecutionException("task " + r + " rejected from " + e, ie); - } - } - } - } - public LoadTreeStatementDataTypeConvertExecutionVisitor( final StatementExecutor statementExecutor) { this.statementExecutor = statementExecutor; @@ -118,7 +89,6 @@ public Optional visitLoadFile( final List tabletRawReqSizes = new ArrayList<>(); try { - final List> executionFutures = new ArrayList<>(); for (final File file : loadTsFileStatement.getTsFiles()) { try (final TsFileInsertionScanDataContainer container = new TsFileInsertionScanDataContainer( @@ -136,16 +106,9 @@ file, new IoTDBPipePattern(null), Long.MIN_VALUE, Long.MAX_VALUE, null, null)) { continue; } - final InsertMultiTabletsStatement batchStatement = new InsertMultiTabletsStatement(); - batchStatement.setInsertTabletStatementList( - tabletRawReqs.stream() - .map( - req -> - new LoadConvertedInsertTabletStatement( - req.constructStatement(), - loadTsFileStatement.isConvertOnTypeMismatch())) - .collect(Collectors.toList())); - executionFutures.add(executeInsertMultiTabletsWithRetry(batchStatement)); + final TSStatus result = + executeInsertMultiTabletsWithRetry( + tabletRawReqs, loadTsFileStatement.isConvertOnTypeMismatch()); for (final long memoryCost : tabletRawReqSizes) { block.reduceMemoryUsage(memoryCost); @@ -153,6 +116,10 @@ file, new IoTDBPipePattern(null), Long.MIN_VALUE, Long.MAX_VALUE, null, null)) { tabletRawReqs.clear(); tabletRawReqSizes.clear(); + if (!handleTSStatus(result, loadTsFileStatement)) { + return Optional.empty(); + } + tabletRawReqs.add(tabletRawReq); tabletRawReqSizes.add(curMemory); block.addMemoryUsage(curMemory); @@ -166,36 +133,22 @@ file, new IoTDBPipePattern(null), Long.MIN_VALUE, Long.MAX_VALUE, null, null)) { if (!tabletRawReqs.isEmpty()) { try { - final InsertMultiTabletsStatement batchStatement = new InsertMultiTabletsStatement(); - batchStatement.setInsertTabletStatementList( - tabletRawReqs.stream() - .map( - req -> - new LoadConvertedInsertTabletStatement( - req.constructStatement(), - loadTsFileStatement.isConvertOnTypeMismatch())) - .collect(Collectors.toList())); - executionFutures.add(executeInsertMultiTabletsWithRetry(batchStatement)); + final TSStatus result = + executeInsertMultiTabletsWithRetry( + tabletRawReqs, loadTsFileStatement.isConvertOnTypeMismatch()); for (final long memoryCost : tabletRawReqSizes) { block.reduceMemoryUsage(memoryCost); } tabletRawReqs.clear(); tabletRawReqSizes.clear(); - } catch (final Exception e) { - LOGGER.warn( - "Failed to convert data type for LoadTsFileStatement: {}.", loadTsFileStatement, e); - return Optional.empty(); - } - } - for (final Future future : executionFutures) { - try { - if (!handleTSStatus(future.get(), loadTsFileStatement)) { + if (!handleTSStatus(result, loadTsFileStatement)) { return Optional.empty(); } - } catch (ExecutionException | InterruptedException e) { - LOGGER.warn("Exception occurs when executing insertion during tablet conversion: ", e); + } catch (final Exception e) { + LOGGER.warn( + "Failed to convert data type for LoadTsFileStatement: {}.", loadTsFileStatement, e); return Optional.empty(); } } @@ -226,67 +179,43 @@ file, new IoTDBPipePattern(null), Long.MIN_VALUE, Long.MAX_VALUE, null, null)) { return Optional.of(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); } - private Future executeInsertMultiTabletsWithRetry( - final InsertMultiTabletsStatement batchStatement) { - return getExecutorPool() - .submit( - () -> { - TSStatus result; - try { - result = - batchStatement.accept( - LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR, - statementExecutor.execute(batchStatement)); - - // Retry max 5 times if the write process is rejected - for (int i = 0; - i < 5 - && result.getCode() - == TSStatusCode.LOAD_TEMPORARY_UNAVAILABLE_EXCEPTION.getStatusCode(); - i++) { - Thread.sleep(100L * (i + 1)); - result = - batchStatement.accept( - LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR, - statementExecutor.execute(batchStatement)); - } - } catch (final Exception e) { - if (e instanceof InterruptedException) { - Thread.currentThread().interrupt(); - } - result = - batchStatement.accept( - LoadTsFileDataTypeConverter.STATEMENT_EXCEPTION_VISITOR, e); - } - return result; - }); - } - - public static WrappedThreadPoolExecutor getExecutorPool() { - if (executorPool.get() == null) { - synchronized (executorPool) { - if (executorPool.get() == null) { - executorPool.set( - new WrappedThreadPoolExecutor( - IoTDBDescriptor.getInstance() - .getConfig() - .getLoadTsFileTabletConversionThreadCount(), - IoTDBDescriptor.getInstance() - .getConfig() - .getLoadTsFileTabletConversionThreadCount(), - 0L, - TimeUnit.SECONDS, - new ArrayBlockingQueue<>( - IoTDBDescriptor.getInstance() - .getConfig() - .getLoadTsFileTabletConversionThreadCount()), - new IoTThreadFactory(ThreadName.LOAD_DATATYPE_CONVERT_POOL.getName()), - ThreadName.LOAD_DATATYPE_CONVERT_POOL.getName(), - new CallerBlocksPolicy())); - } + private TSStatus executeInsertMultiTabletsWithRetry( + final List tabletRawReqs, boolean isConvertOnTypeMismatch) { + final InsertMultiTabletsStatement batchStatement = new InsertMultiTabletsStatement(); + batchStatement.setInsertTabletStatementList( + tabletRawReqs.stream() + .map( + req -> + new LoadConvertedInsertTabletStatement( + req.constructStatement(), isConvertOnTypeMismatch)) + .collect(Collectors.toList())); + + TSStatus result; + try { + result = + batchStatement.accept( + LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR, + statementExecutor.execute(batchStatement)); + + // Retry max 5 times if the write process is rejected + for (int i = 0; + i < 5 + && result.getCode() + == TSStatusCode.LOAD_TEMPORARY_UNAVAILABLE_EXCEPTION.getStatusCode(); + i++) { + Thread.sleep(100L * (i + 1)); + result = + batchStatement.accept( + LoadTsFileDataTypeConverter.STATEMENT_STATUS_VISITOR, + statementExecutor.execute(batchStatement)); + } + } catch (final Exception e) { + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); } + result = batchStatement.accept(LoadTsFileDataTypeConverter.STATEMENT_EXCEPTION_VISITOR, e); } - return executorPool.get(); + return result; } private static boolean handleTSStatus( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java index a46546e3e361..81a1f0639beb 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/load/converter/LoadTsFileDataTypeConverter.java @@ -20,14 +20,13 @@ package org.apache.iotdb.db.storageengine.load.converter; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.protocol.session.IClientSession; -import org.apache.iotdb.db.protocol.session.InternalClientSession; import org.apache.iotdb.db.protocol.session.SessionManager; +import org.apache.iotdb.db.queryengine.common.MPPQueryContext; import org.apache.iotdb.db.queryengine.plan.Coordinator; import org.apache.iotdb.db.queryengine.plan.analyze.ClusterPartitionFetcher; +import org.apache.iotdb.db.queryengine.plan.analyze.lock.DataNodeSchemaLockManager; +import org.apache.iotdb.db.queryengine.plan.analyze.lock.SchemaLockType; import org.apache.iotdb.db.queryengine.plan.analyze.schema.ClusterSchemaFetcher; import org.apache.iotdb.db.queryengine.plan.statement.Statement; import org.apache.iotdb.db.queryengine.plan.statement.crud.LoadTsFileStatement; @@ -37,7 +36,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.time.ZoneId; import java.util.Optional; public class LoadTsFileDataTypeConverter { @@ -52,16 +50,20 @@ public class LoadTsFileDataTypeConverter { STATEMENT_EXCEPTION_VISITOR = new LoadConvertedInsertTabletStatementExceptionVisitor(); private final boolean isGeneratedByPipe; + private final MPPQueryContext context; private final LoadTreeStatementDataTypeConvertExecutionVisitor treeStatementDataTypeConvertExecutionVisitor = new LoadTreeStatementDataTypeConvertExecutionVisitor(this::executeForTreeModel); - public LoadTsFileDataTypeConverter(final boolean isGeneratedByPipe) { + public LoadTsFileDataTypeConverter( + final MPPQueryContext context, final boolean isGeneratedByPipe) { + this.context = context; this.isGeneratedByPipe = isGeneratedByPipe; } public Optional convertForTreeModel(final LoadTsFileStatement loadTsFileTreeStatement) { + DataNodeSchemaLockManager.getInstance().releaseReadLock(context); try { return loadTsFileTreeStatement.accept(treeStatementDataTypeConvertExecutionVisitor, null); } catch (Exception e) { @@ -69,36 +71,24 @@ public Optional convertForTreeModel(final LoadTsFileStatement loadTsFi "Failed to convert data types for tree model statement {}.", loadTsFileTreeStatement, e); return Optional.of( new TSStatus(TSStatusCode.LOAD_FILE_ERROR.getStatusCode()).setMessage(e.getMessage())); + } finally { + DataNodeSchemaLockManager.getInstance() + .takeReadLock(context, SchemaLockType.VALIDATE_VS_DELETION); } } private TSStatus executeForTreeModel(final Statement statement) { - final IClientSession session = - new InternalClientSession( - String.format( - "%s_%s", - LoadTsFileDataTypeConverter.class.getSimpleName(), - Thread.currentThread().getName())); - session.setUsername(AuthorityChecker.SUPER_USER); - session.setClientVersion(IoTDBConstant.ClientVersion.V_1_0); - session.setZoneId(ZoneId.systemDefault()); - - SESSION_MANAGER.registerSession(session); - try { - return Coordinator.getInstance() - .executeForTreeModel( - isGeneratedByPipe ? new PipeEnrichedStatement(statement) : statement, - SESSION_MANAGER.requestQueryId(), - SESSION_MANAGER.getSessionInfo(session), - "", - ClusterPartitionFetcher.getInstance(), - ClusterSchemaFetcher.getInstance(), - IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), - false) - .status; - } finally { - SESSION_MANAGER.removeCurrSession(); - } + return Coordinator.getInstance() + .executeForTreeModel( + isGeneratedByPipe ? new PipeEnrichedStatement(statement) : statement, + SESSION_MANAGER.requestQueryId(), + SESSION_MANAGER.getSessionInfo(SESSION_MANAGER.getCurrSession()), + "", + ClusterPartitionFetcher.getInstance(), + ClusterSchemaFetcher.getInstance(), + IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(), + false) + .status; } public boolean isSuccessful(final TSStatus status) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/execution/SubscriptionSubtaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/execution/SubscriptionSubtaskExecutor.java index f473a56a4478..5dcc5d8943cc 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/execution/SubscriptionSubtaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/execution/SubscriptionSubtaskExecutor.java @@ -26,25 +26,22 @@ import org.apache.iotdb.db.pipe.agent.task.execution.PipeConnectorSubtaskExecutor; import org.apache.iotdb.db.subscription.task.subtask.SubscriptionReceiverSubtask; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; public class SubscriptionSubtaskExecutor extends PipeConnectorSubtaskExecutor { - - private static final Logger LOGGER = LoggerFactory.getLogger(SubscriptionSubtaskExecutor.class); + private static final AtomicInteger id = new AtomicInteger(0); private final AtomicLong submittedReceiverSubtasks = new AtomicLong(0); public SubscriptionSubtaskExecutor() { super( SubscriptionConfig.getInstance().getSubscriptionSubtaskExecutorMaxThreadNum(), - ThreadName.SUBSCRIPTION_EXECUTOR_POOL); + ThreadName.SUBSCRIPTION_EXECUTOR_POOL.getName() + "-" + id.getAndIncrement()); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/stage/SubscriptionTaskConnectorStage.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/stage/SubscriptionTaskConnectorStage.java index 003d712cc5b4..7d26701004ad 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/stage/SubscriptionTaskConnectorStage.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/task/stage/SubscriptionTaskConnectorStage.java @@ -36,7 +36,7 @@ public SubscriptionTaskConnectorStage( PipeParameters pipeConnectorParameters, int regionId, PipeConnectorSubtaskExecutor executor) { - super(pipeName, creationTime, pipeConnectorParameters, regionId, executor); + super(pipeName, creationTime, pipeConnectorParameters, regionId, () -> executor); } @Override @@ -44,7 +44,7 @@ protected void registerSubtask() { this.connectorSubtaskId = SubscriptionConnectorSubtaskManager.instance() .register( - executor, + executor.get(), pipeConnectorParameters, new PipeTaskConnectorRuntimeEnvironment(pipeName, creationTime, regionId)); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/tools/validate/TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/tools/validate/TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool.java new file mode 100644 index 000000000000..e14f7e81d054 --- /dev/null +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/tools/validate/TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool.java @@ -0,0 +1,263 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.tools.validate; + +import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; + +import org.apache.tsfile.common.constant.TsFileConstant; +import org.slf4j.Logger; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +public class TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool { + + private static final Logger LOGGER = + org.slf4j.LoggerFactory.getLogger( + TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool.class); + + private static final String USAGE = + "Usage: --expected true|false --dirs ...\n" + + " --expected: whether the TsFileResource is expected to be generated by pipe\n" + + " --dirs: list of data directories to validate and repair"; + + private static final Set dataDirs = new ConcurrentSkipListSet<>(); + private static final AtomicBoolean expectedMark = new AtomicBoolean(true); + + private static final AtomicLong runtime = new AtomicLong(System.currentTimeMillis()); + + private static final AtomicInteger totalTsFileNum = new AtomicInteger(0); + private static final AtomicInteger toRepairTsFileNum = new AtomicInteger(0); + + // Usage: --expected true|false --dirs ... + public static void main(String[] args) throws IOException { + parseCommandLineArgs(args); + final List partitionDirs = findAllPartitionDirs(); + partitionDirs.parallelStream() + .forEach( + TsFileResourceIsGeneratedByPipeMarkValidationAndRepairTool + ::validateAndRepairTsFileResourcesInPartition); + printStatistics(); + } + + private static void parseCommandLineArgs(final String[] args) { + final Set argSet = + new ConcurrentSkipListSet<>( + args.length > 0 ? Arrays.asList(args) : Collections.emptyList()); + if (args.length == 0 + || argSet.contains("--help") + || argSet.contains("-h") + || !(argSet.contains("--expected") && argSet.contains("--dirs"))) { + LOGGER.info(USAGE); + System.exit(1); + } + + for (int i = 0; i < args.length; i++) { + if ("--expected".equals(args[i]) && i + 1 < args.length) { + expectedMark.set(Boolean.parseBoolean(args[++i])); + } else if ("--dirs".equals(args[i]) && i + 1 < args.length) { + i++; + while (i < args.length && !args[i].startsWith("--")) { + dataDirs.add(new File(args[i++])); + } + i--; + } else { + LOGGER.info("Unknown argument: {}", args[i]); + LOGGER.info(USAGE); + // Exit if an unknown argument is encountered + System.exit(1); + } + } + + if (dataDirs.isEmpty()) { + LOGGER.info("No data directories provided. Please specify with --dirs ..."); + System.exit(1); + } + + LOGGER.info("------------------------------------------------------"); + LOGGER.info("Expected mark: {}", expectedMark.get()); + LOGGER.info("Data directories: "); + for (File dir : dataDirs) { + LOGGER.info(" {}", dir.getAbsolutePath()); + } + LOGGER.info("------------------------------------------------------"); + } + + private static List findAllPartitionDirs() { + final List partitionDirs = new ArrayList<>(); + for (final File dataDir : dataDirs) { + if (dataDir.exists() && dataDir.isDirectory()) { + partitionDirs.addAll(findLeafDirectories(dataDir)); + } + } + return partitionDirs; + } + + public static List findLeafDirectories(File dir) { + List leafDirectories = new ArrayList<>(); + + File[] files = dir.listFiles(); + + if (files == null || files.length == 0) { + leafDirectories.add(dir); + return leafDirectories; + } + + for (File file : files) { + if (file.isDirectory()) { + leafDirectories.addAll(findLeafDirectories(file)); + } + } + + if (leafDirectories.isEmpty()) { + leafDirectories.add(dir); + } + + return leafDirectories; + } + + private static void validateAndRepairTsFileResourcesInPartition(final File partitionDir) { + final AtomicInteger totalResources = new AtomicInteger(); + final AtomicInteger toRepairResources = new AtomicInteger(); + + try { + final List resources = + loadAllTsFileResources(Collections.singletonList(partitionDir)); + totalResources.addAndGet(resources.size()); + + for (final TsFileResource resource : resources) { + try { + if (validateAndRepairSingleTsFileResource(resource)) { + toRepairResources.incrementAndGet(); + } + } catch (final Exception e) { + // Continue processing other resources even if one fails + LOGGER.warn( + "Error validating or repairing resource {}: {}", + resource.getTsFile().getAbsolutePath(), + e.getMessage(), + e); + } + } + } catch (final Exception e) { + LOGGER.warn( + "Error loading resources from partition {}: {}", + partitionDir.getAbsolutePath(), + e.getMessage(), + e); + } + + totalTsFileNum.addAndGet(totalResources.get()); + toRepairTsFileNum.addAndGet(toRepairResources.get()); + LOGGER.info( + "TimePartition {} has {} total resources, {} to repair resources. Process completed.", + partitionDir, + totalResources.get(), + toRepairResources.get()); + } + + private static List loadAllTsFileResources(List timePartitionDirs) + throws IOException { + final List resources = new ArrayList<>(); + + for (final File timePartitionDir : timePartitionDirs) { + for (final File tsfile : Objects.requireNonNull(timePartitionDir.listFiles())) { + final String filePath = tsfile.getAbsolutePath(); + if (!filePath.endsWith(TsFileConstant.TSFILE_SUFFIX) || !tsfile.isFile()) { + continue; + } + String resourcePath = tsfile.getAbsolutePath() + TsFileResource.RESOURCE_SUFFIX; + + if (!new File(resourcePath).exists()) { + LOGGER.info( + "{} is skipped because resource file is not exist.", tsfile.getAbsolutePath()); + continue; + } + + TsFileResource resource = new TsFileResource(tsfile); + resource.deserialize(); + resource.close(); + resources.add(resource); + } + } + + return resources; + } + + /** + * Validates and repairs a single TsFileResource. + * + * @param resource the TsFileResource to validate and repair + * @return true if the resource needs to be repaired and false if it is valid + */ + private static boolean validateAndRepairSingleTsFileResource(TsFileResource resource) { + if (resource.isGeneratedByPipe() == expectedMark.get()) { + // The resource is valid, no need to repair + return false; + } + + LOGGER.info( + "Repairing TsFileResource: {}, expected mark: {}, actual mark: {}", + resource.getTsFile().getAbsolutePath(), + expectedMark.get(), + resource.isGeneratedByPipe()); + + try { + repairSingleTsFileResource(resource); + + LOGGER.info( + "Marked TsFileResource as {} in resource: {}", + expectedMark.get(), + resource.getTsFile().getAbsolutePath()); + } catch (final Exception e) { + LOGGER.warn( + "ERROR: Failed to repair TsFileResource: {}, error: {}", + resource.getTsFile().getAbsolutePath(), + e.getMessage()); + } + + return true; + } + + private static void repairSingleTsFileResource(TsFileResource resource) throws IOException { + resource.setGeneratedByPipe(expectedMark.get()); + resource.serialize(); + } + + private static void printStatistics() { + LOGGER.info("------------------------------------------------------"); + LOGGER.info("Validation and repair completed. Statistics:"); + LOGGER.info( + "Total time taken: {} ms, total TsFile resources: {}, repaired TsFile resources: {}", + System.currentTimeMillis() - runtime.get(), + totalTsFileNum.get(), + toRepairTsFileNum.get()); + } +} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java index 8e69473b95c6..a581d111dc6b 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/connector/PipeDataNodeThriftRequestTest.java @@ -71,7 +71,6 @@ public void testPipeTransferDataNodeHandshakeReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getTimestampPrecision(), deserializeReq.getTimestampPrecision()); } @@ -94,7 +93,6 @@ public void testPipeTransferInsertNodeReq() { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getInsertNode(), deserializeReq.getInsertNode()); @@ -114,7 +112,6 @@ public void testPipeTransferTabletBinaryReq() { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); } @Test @@ -137,7 +134,6 @@ public void testPipeTransferSchemaPlanReq() { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getPlanNode(), deserializeReq.getPlanNode()); } @@ -176,7 +172,6 @@ public void testPipeTransferTabletReq() { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); final Statement statement = req.constructStatement(); // will call PipeTransferTabletRawReq.sortTablet() here @@ -264,8 +259,6 @@ public void testPipeTransferTabletBatchReq() throws IOException { final PipeTransferTabletBatchReq deserializedReq = PipeTransferTabletBatchReq.fromTPipeTransferReq(req); - Assert.assertArrayEquals( - new byte[] {'a', 'b'}, deserializedReq.getBinaryReqs().get(0).getBody()); Assert.assertEquals(node, deserializedReq.getInsertNodeReqs().get(0).getInsertNode()); Assert.assertEquals(t, deserializedReq.getTabletReqs().get(0).getTablet()); Assert.assertFalse(deserializedReq.getTabletReqs().get(0).getIsAligned()); @@ -283,7 +276,6 @@ public void testPipeTransferFilePieceReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileName(), deserializeReq.getFileName()); Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset()); @@ -302,7 +294,6 @@ public void testPipeTransferFilePieceWithModReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileName(), deserializeReq.getFileName()); Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset()); @@ -321,7 +312,6 @@ public void testPipeTransferSchemaSnapshotPieceReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileName(), deserializeReq.getFileName()); Assert.assertEquals(req.getStartWritingOffset(), deserializeReq.getStartWritingOffset()); @@ -339,7 +329,6 @@ public void testPipeTransferTsFileSealReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileName(), deserializeReq.getFileName()); Assert.assertEquals(req.getFileLength(), deserializeReq.getFileLength()); @@ -361,7 +350,6 @@ public void testPipeTransferSchemaSnapshotSealReq() throws IOException { Assert.assertEquals(req.getVersion(), deserializeReq.getVersion()); Assert.assertEquals(req.getType(), deserializeReq.getType()); - Assert.assertArrayEquals(req.getBody(), deserializeReq.getBody()); Assert.assertEquals(req.getFileNames(), deserializeReq.getFileNames()); Assert.assertEquals(req.getFileLengths(), deserializeReq.getFileLengths()); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java index d97f2da68e6d..f59038982ea1 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/event/TsFileInsertionDataContainerTest.java @@ -19,12 +19,10 @@ package org.apache.iotdb.db.pipe.event; -import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.pipe.datastructure.pattern.IoTDBPipePattern; import org.apache.iotdb.commons.pipe.datastructure.pattern.PipePattern; import org.apache.iotdb.commons.pipe.datastructure.pattern.PrefixPipePattern; import org.apache.iotdb.db.pipe.event.common.tablet.PipeRawTabletInsertionEvent; -import org.apache.iotdb.db.pipe.event.common.tsfile.PipeTsFileInsertionEvent; import org.apache.iotdb.db.pipe.event.common.tsfile.container.TsFileInsertionDataContainer; import org.apache.iotdb.db.pipe.event.common.tsfile.container.query.TsFileInsertionQueryDataContainer; import org.apache.iotdb.db.pipe.event.common.tsfile.container.scan.TsFileInsertionScanDataContainer; @@ -524,7 +522,7 @@ private void testMixedTsFileWithEmptyChunk(final boolean isQuery) throws IOExcep } private void testPartialNullValue(final boolean isQuery) - throws IOException, WriteProcessException, IllegalPathException { + throws IOException, WriteProcessException { alignedTsFile = new File("0-0-2-0.tsfile"); final List schemaList = new ArrayList<>(); @@ -558,18 +556,6 @@ private void testTsFilePointNum( final long endTime, final boolean isQuery, final int expectedCount) { - PipeTsFileInsertionEvent tsFileInsertionEvent = - new PipeTsFileInsertionEvent( - new TsFileResource(tsFile), - true, - false, - false, - null, - 0, - null, - null, - Long.MIN_VALUE, - Long.MAX_VALUE); try (final TsFileInsertionDataContainer tsFileContainer = isQuery ? new TsFileInsertionQueryDataContainer(tsFile, pattern, startTime, endTime) diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java index 46e957917068..afac224e8ee7 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/extractor/PipeRealtimeExtractTest.java @@ -20,7 +20,9 @@ package org.apache.iotdb.db.pipe.extractor; import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.consensus.index.impl.MinimumProgressIndex; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTaskMeta; import org.apache.iotdb.commons.pipe.config.constant.PipeExtractorConstant; import org.apache.iotdb.commons.pipe.config.plugin.configuraion.PipeTaskRuntimeConfiguration; import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskExtractorRuntimeEnvironment; @@ -33,13 +35,13 @@ import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; import org.apache.iotdb.db.storageengine.dataregion.tsfile.TsFileResource; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameterValidator; import org.apache.iotdb.pipe.api.customizer.parameter.PipeParameters; import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.event.dml.insertion.TabletInsertionEvent; import org.apache.tsfile.common.constant.TsFileConstant; +import org.apache.tsfile.enums.TSDataType; import org.apache.tsfile.file.metadata.PlainDeviceID; import org.junit.After; import org.junit.Assert; @@ -63,8 +65,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; -import static org.mockito.Mockito.mock; - public class PipeRealtimeExtractTest { private static final Logger LOGGER = LoggerFactory.getLogger(PipeRealtimeExtractTest.class); @@ -106,36 +106,37 @@ public void tearDown() { public void testRealtimeExtractProcess() { // set up realtime extractor - try (PipeRealtimeDataRegionLogExtractor extractor0 = new PipeRealtimeDataRegionLogExtractor(); - PipeRealtimeDataRegionHybridExtractor extractor1 = + try (final PipeRealtimeDataRegionLogExtractor extractor0 = + new PipeRealtimeDataRegionLogExtractor(); + final PipeRealtimeDataRegionHybridExtractor extractor1 = new PipeRealtimeDataRegionHybridExtractor(); - PipeRealtimeDataRegionTsFileExtractor extractor2 = + final PipeRealtimeDataRegionTsFileExtractor extractor2 = new PipeRealtimeDataRegionTsFileExtractor(); - PipeRealtimeDataRegionHybridExtractor extractor3 = + final PipeRealtimeDataRegionHybridExtractor extractor3 = new PipeRealtimeDataRegionHybridExtractor()) { - PipeParameters parameters0 = + final PipeParameters parameters0 = new PipeParameters( new HashMap() { { put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, pattern1); } }); - PipeParameters parameters1 = + final PipeParameters parameters1 = new PipeParameters( new HashMap() { { put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, pattern2); } }); - PipeParameters parameters2 = + final PipeParameters parameters2 = new PipeParameters( new HashMap() { { put(PipeExtractorConstant.EXTRACTOR_PATTERN_KEY, pattern1); } }); - PipeParameters parameters3 = + final PipeParameters parameters3 = new PipeParameters( new HashMap() { { @@ -143,18 +144,34 @@ public void testRealtimeExtractProcess() { } }); - PipeTaskRuntimeConfiguration configuration0 = + final PipeTaskRuntimeConfiguration configuration0 = new PipeTaskRuntimeConfiguration( - new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion1), null)); - PipeTaskRuntimeConfiguration configuration1 = + new PipeTaskExtractorRuntimeEnvironment( + "1", + 1, + Integer.parseInt(dataRegion1), + new PipeTaskMeta(MinimumProgressIndex.INSTANCE, 1))); + final PipeTaskRuntimeConfiguration configuration1 = new PipeTaskRuntimeConfiguration( - new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion1), null)); - PipeTaskRuntimeConfiguration configuration2 = + new PipeTaskExtractorRuntimeEnvironment( + "1", + 1, + Integer.parseInt(dataRegion1), + new PipeTaskMeta(MinimumProgressIndex.INSTANCE, 1))); + final PipeTaskRuntimeConfiguration configuration2 = new PipeTaskRuntimeConfiguration( - new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion2), null)); - PipeTaskRuntimeConfiguration configuration3 = + new PipeTaskExtractorRuntimeEnvironment( + "1", + 1, + Integer.parseInt(dataRegion2), + new PipeTaskMeta(MinimumProgressIndex.INSTANCE, 1))); + final PipeTaskRuntimeConfiguration configuration3 = new PipeTaskRuntimeConfiguration( - new PipeTaskExtractorRuntimeEnvironment("1", 1, Integer.parseInt(dataRegion2), null)); + new PipeTaskExtractorRuntimeEnvironment( + "1", + 1, + Integer.parseInt(dataRegion2), + new PipeTaskMeta(MinimumProgressIndex.INSTANCE, 1))); // Some parameters of extractor are validated and initialized during the validation process. extractor0.validate(new PipeParameterValidator(parameters0)); @@ -166,7 +183,7 @@ public void testRealtimeExtractProcess() { extractor3.validate(new PipeParameterValidator(parameters3)); extractor3.customize(parameters3, configuration3); - PipeRealtimeDataRegionExtractor[] extractors = + final PipeRealtimeDataRegionExtractor[] extractors = new PipeRealtimeDataRegionExtractor[] {extractor0, extractor1, extractor2, extractor3}; // start extractor 0, 1 @@ -174,7 +191,7 @@ public void testRealtimeExtractProcess() { extractors[1].start(); // test result of extractor 0, 1 - int writeNum = 10; + final int writeNum = 10; List> writeFutures = Arrays.asList( write2DataRegion(writeNum, dataRegion1, 0), @@ -192,7 +209,7 @@ public void testRealtimeExtractProcess() { try { listenFutures.get(0).get(10, TimeUnit.MINUTES); listenFutures.get(1).get(10, TimeUnit.MINUTES); - } catch (TimeoutException e) { + } catch (final TimeoutException e) { LOGGER.warn("Time out when listening extractor", e); alive.set(false); Assert.fail(); @@ -234,7 +251,7 @@ public void testRealtimeExtractProcess() { listenFutures.get(1).get(10, TimeUnit.MINUTES); listenFutures.get(2).get(10, TimeUnit.MINUTES); listenFutures.get(3).get(10, TimeUnit.MINUTES); - } catch (TimeoutException e) { + } catch (final TimeoutException e) { LOGGER.warn("Time out when listening extractor", e); alive.set(false); Assert.fail(); @@ -247,34 +264,34 @@ public void testRealtimeExtractProcess() { throw new RuntimeException(e); } }); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } } - private Future write2DataRegion(int writeNum, String dataRegionId, int startNum) { - - File dataRegionDir = + private Future write2DataRegion( + final int writeNum, final String dataRegionId, final int startNum) { + final File dataRegionDir = new File(tsFileDir.getPath() + File.separator + dataRegionId + File.separator + "0"); - boolean ignored = dataRegionDir.mkdirs(); + final boolean ignored = dataRegionDir.mkdirs(); return writeService.submit( () -> { for (int i = startNum; i < startNum + writeNum; ++i) { - File tsFile = new File(dataRegionDir, String.format("%s-%s-0-0.tsfile", i, i)); + final File tsFile = new File(dataRegionDir, String.format("%s-%s-0-0.tsfile", i, i)); try { - boolean ignored1 = tsFile.createNewFile(); - } catch (IOException e) { + final boolean ignored1 = tsFile.createNewFile(); + } catch (final IOException e) { e.printStackTrace(); throw new RuntimeException(e); } - TsFileResource resource = new TsFileResource(tsFile); + final TsFileResource resource = new TsFileResource(tsFile); resource.updateStartTime( new PlainDeviceID(String.join(TsFileConstant.PATH_SEPARATOR, device)), 0); try { resource.close(); - } catch (IOException e) { + } catch (final IOException e) { e.printStackTrace(); throw new RuntimeException(e); } @@ -282,39 +299,39 @@ private Future write2DataRegion(int writeNum, String dataRegionId, int startN PipeInsertionDataNodeListener.getInstance() .listenToInsertNode( dataRegionId, - mock(WALEntryHandler.class), new InsertRowNode( new PlanNodeId(String.valueOf(i)), new PartialPath(device), false, new String[] {"a"}, - null, + new TSDataType[] {TSDataType.INT32}, 0, - null, + new Integer[] {1}, false), resource); PipeInsertionDataNodeListener.getInstance() .listenToInsertNode( dataRegionId, - mock(WALEntryHandler.class), new InsertRowNode( new PlanNodeId(String.valueOf(i)), new PartialPath(device), false, new String[] {"b"}, - null, + new TSDataType[] {TSDataType.INT32}, 0, - null, + new Integer[] {1}, false), resource); PipeInsertionDataNodeListener.getInstance() - .listenToTsFile(dataRegionId, resource, false, false); + .listenToTsFile(dataRegionId, resource, false); } }); } private Future listen( - PipeRealtimeDataRegionExtractor extractor, Function weight, int expectNum) { + final PipeRealtimeDataRegionExtractor extractor, + final Function weight, + final int expectNum) { return listenerService.submit( () -> { int eventNum = 0; @@ -323,7 +340,7 @@ private Future listen( Event event; try { event = extractor.supply(); - } catch (Exception e) { + } catch (final Exception e) { throw new RuntimeException(e); } if (event != null) { diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java index 0281bb035194..85b432a5114f 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeTsFileResourceManagerTest.java @@ -24,7 +24,6 @@ import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.db.pipe.agent.PipeDataNodeAgent; -import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResource; import org.apache.iotdb.db.pipe.resource.tsfile.PipeTsFileResourceManager; import org.apache.iotdb.db.storageengine.dataregion.modification.Deletion; import org.apache.iotdb.db.storageengine.dataregion.modification.Modification; @@ -47,9 +46,7 @@ import java.io.File; import java.io.IOException; import java.nio.file.Files; -import java.util.concurrent.TimeUnit; -import static org.awaitility.Awaitility.await; import static org.junit.Assert.fail; public class PipeTsFileResourceManagerTest { @@ -59,6 +56,7 @@ public class PipeTsFileResourceManagerTest { ROOT_DIR + File.separator + IoTDBConstant.SEQUENCE_FOLDER_NAME; private static final String TS_FILE_NAME = SEQUENCE_DIR + File.separator + "test.tsfile"; private static final String MODS_FILE_NAME = TS_FILE_NAME + ".mods"; + private static final String PIPE_NAME = "pipe"; private PipeTsFileResourceManager pipeTsFileResourceManager; @@ -156,53 +154,55 @@ public void tearDown() throws Exception { } @Test - public void testIncreaseTsfile() throws IOException { - File originTsfile = new File(TS_FILE_NAME); - File originModFile = new File(MODS_FILE_NAME); - Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originTsfile)); - Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile)); - - File pipeTsfile = pipeTsFileResourceManager.increaseFileReference(originTsfile, true, null); - File pipeModFile = pipeTsFileResourceManager.increaseFileReference(originModFile, false, null); - Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile)); - Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile)); + public void testIncreaseTsFile() throws IOException { + final File originTsfile = new File(TS_FILE_NAME); + final File originModFile = new File(MODS_FILE_NAME); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originTsfile, null)); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile, null)); + + final File pipeTsfile = + pipeTsFileResourceManager.increaseFileReference(originTsfile, true, PIPE_NAME); + final File pipeModFile = + pipeTsFileResourceManager.increaseFileReference(originModFile, false, PIPE_NAME); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null)); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null)); Assert.assertTrue(Files.exists(originTsfile.toPath())); Assert.assertTrue(Files.exists(originModFile.toPath())); Assert.assertTrue(Files.exists(pipeTsfile.toPath())); Assert.assertTrue(Files.exists(pipeModFile.toPath())); - pipeTsFileResourceManager.increaseFileReference(originTsfile, true, null); - pipeTsFileResourceManager.increaseFileReference(originModFile, false, null); - Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile)); - Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile)); - - // test use hardlinkTsFile to increase reference counts - pipeTsFileResourceManager.increaseFileReference(pipeTsfile, true, null); - Assert.assertEquals(3, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile)); + // test use assigner's hardlinkTsFile to increase reference counts + // test null, shall not reuse the pipe's tsFile + pipeTsFileResourceManager.increaseFileReference(pipeTsfile, true, PIPE_NAME); + Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null)); + Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, PIPE_NAME)); Assert.assertTrue(Files.exists(originTsfile.toPath())); Assert.assertTrue(Files.exists(pipeTsfile.toPath())); // test use copyFile to increase reference counts - pipeTsFileResourceManager.increaseFileReference(pipeModFile, false, null); - Assert.assertEquals(3, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile)); + pipeTsFileResourceManager.increaseFileReference(pipeModFile, false, PIPE_NAME); + Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null)); + Assert.assertEquals(2, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, PIPE_NAME)); Assert.assertTrue(Files.exists(originModFile.toPath())); Assert.assertTrue(Files.exists(pipeModFile.toPath())); } @Test - public void testDecreaseTsfile() throws IOException { - File originFile = new File(TS_FILE_NAME); - File originModFile = new File(MODS_FILE_NAME); - - pipeTsFileResourceManager.decreaseFileReference(originFile); - pipeTsFileResourceManager.decreaseFileReference(originModFile); - Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originFile)); - Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile)); - - File pipeTsfile = pipeTsFileResourceManager.increaseFileReference(originFile, true, null); - File pipeModFile = pipeTsFileResourceManager.increaseFileReference(originModFile, false, null); - Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile)); - Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile)); + public void testDecreaseTsFile() throws IOException { + final File originFile = new File(TS_FILE_NAME); + final File originModFile = new File(MODS_FILE_NAME); + + pipeTsFileResourceManager.decreaseFileReference(originFile, PIPE_NAME); + pipeTsFileResourceManager.decreaseFileReference(originModFile, PIPE_NAME); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originFile, null)); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(originModFile, null)); + + final File pipeTsfile = + pipeTsFileResourceManager.increaseFileReference(originFile, true, PIPE_NAME); + final File pipeModFile = + pipeTsFileResourceManager.increaseFileReference(originModFile, false, PIPE_NAME); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null)); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null)); Assert.assertTrue(Files.exists(pipeTsfile.toPath())); Assert.assertTrue(Files.exists(pipeModFile.toPath())); Assert.assertTrue(Files.exists(pipeTsfile.toPath())); @@ -213,26 +213,22 @@ public void testDecreaseTsfile() throws IOException { Assert.assertFalse(Files.exists(originFile.toPath())); Assert.assertFalse(Files.exists(originModFile.toPath())); - Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile)); - Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile)); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, PIPE_NAME)); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null)); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, PIPE_NAME)); + Assert.assertEquals(1, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null)); Assert.assertFalse(Files.exists(originFile.toPath())); Assert.assertFalse(Files.exists(originModFile.toPath())); Assert.assertTrue(Files.exists(pipeTsfile.toPath())); Assert.assertTrue(Files.exists(pipeModFile.toPath())); - pipeTsFileResourceManager.decreaseFileReference(pipeTsfile); - pipeTsFileResourceManager.decreaseFileReference(pipeModFile); - Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile)); - Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile)); + pipeTsFileResourceManager.decreaseFileReference(pipeTsfile, PIPE_NAME); + pipeTsFileResourceManager.decreaseFileReference(pipeModFile, PIPE_NAME); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, PIPE_NAME)); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeTsfile, null)); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, PIPE_NAME)); + Assert.assertEquals(0, pipeTsFileResourceManager.getFileReferenceCount(pipeModFile, null)); Assert.assertFalse(Files.exists(originFile.toPath())); Assert.assertFalse(Files.exists(originModFile.toPath())); - // Pipe TsFile will be cleaned by a timed thread, so we wait some time here. - await() - .atMost(3 * PipeTsFileResource.TSFILE_MIN_TIME_TO_LIVE_IN_MS, TimeUnit.MILLISECONDS) - .untilAsserted( - () -> { - Assert.assertFalse(Files.exists(pipeTsfile.toPath())); - Assert.assertFalse(Files.exists(pipeModFile.toPath())); - }); } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeWALHardlinkResourceManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeWALHardlinkResourceManagerTest.java deleted file mode 100644 index 33e47af0a8dd..000000000000 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/pipe/resource/PipeWALHardlinkResourceManagerTest.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.pipe.resource; - -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.utils.FileUtils; -import org.apache.iotdb.db.pipe.resource.wal.hardlink.PipeWALHardlinkResourceManager; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; - -public class PipeWALHardlinkResourceManagerTest { - private static final String ROOT_DIR = "target" + File.separator + "PipeWALHolderTest"; - - private static final String WAL_DIR = ROOT_DIR + File.separator + IoTDBConstant.WAL_FOLDER_NAME; - - private static final String WAL_NAME = WAL_DIR + File.separator + "test.wal"; - - private PipeWALHardlinkResourceManager pipeWALHardlinkResourceManager; - - @Before - public void setUp() throws Exception { - pipeWALHardlinkResourceManager = new PipeWALHardlinkResourceManager(); - - createWAL(); - } - - private void createWAL() { - File file = new File(WAL_NAME); - if (file.exists()) { - boolean ignored = file.delete(); - } - - try { - file.getParentFile().mkdirs(); - file.createNewFile(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @After - public void tearDown() throws Exception { - File pipeFolder = new File(ROOT_DIR); - if (pipeFolder.exists()) { - FileUtils.deleteFileOrDirectory(pipeFolder); - } - } - - @Test - public void testIncreaseTsfile() throws IOException { - File originWALFile = new File(WAL_NAME); - Assert.assertEquals(0, pipeWALHardlinkResourceManager.getFileReferenceCount(originWALFile)); - - File pipeWALFile = pipeWALHardlinkResourceManager.increaseFileReference(originWALFile); - Assert.assertEquals(1, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile)); - Assert.assertTrue(Files.exists(originWALFile.toPath())); - Assert.assertTrue(Files.exists(pipeWALFile.toPath())); - - // test use hardlinkTsFile to increase reference counts - pipeWALHardlinkResourceManager.increaseFileReference(pipeWALFile); - Assert.assertEquals(2, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile)); - Assert.assertTrue(Files.exists(originWALFile.toPath())); - Assert.assertTrue(Files.exists(pipeWALFile.toPath())); - } - - @Test - public void testDecreaseTsfile() throws IOException { - File originFile = new File(WAL_NAME); - - pipeWALHardlinkResourceManager.decreaseFileReference(originFile); - Assert.assertEquals(0, pipeWALHardlinkResourceManager.getFileReferenceCount(originFile)); - - File pipeWALFile = pipeWALHardlinkResourceManager.increaseFileReference(originFile); - Assert.assertEquals(1, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile)); - Assert.assertTrue(Files.exists(pipeWALFile.toPath())); - Assert.assertTrue(Files.exists(pipeWALFile.toPath())); - - Assert.assertTrue(originFile.delete()); - Assert.assertFalse(Files.exists(originFile.toPath())); - - Assert.assertEquals(1, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile)); - Assert.assertFalse(Files.exists(originFile.toPath())); - Assert.assertTrue(Files.exists(pipeWALFile.toPath())); - - pipeWALHardlinkResourceManager.decreaseFileReference(pipeWALFile); - Assert.assertEquals(0, pipeWALHardlinkResourceManager.getFileReferenceCount(pipeWALFile)); - Assert.assertFalse(Files.exists(originFile.toPath())); - Assert.assertFalse(Files.exists(pipeWALFile.toPath())); - } -} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java index 33bf80115a3c..87b25883fb17 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/TsFileResourceProgressIndexTest.java @@ -128,30 +128,23 @@ public void testProgressIndexRecorder() { Assert.assertTrue( hybridProgressIndex.isAfter(new RecoverProgressIndex(3, new SimpleProgressIndex(5, 4)))); - Assert.assertTrue( - new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndexAfterClose())); + Assert.assertTrue(new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndex())); indexList.forEach(tsFileResource::updateProgressIndex); + Assert.assertFalse(new MockProgressIndex(-1).isAfter(tsFileResource.getMaxProgressIndex())); + Assert.assertFalse(new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndex())); + Assert.assertFalse(new MockProgressIndex(1).isAfter(tsFileResource.getMaxProgressIndex())); Assert.assertFalse( - new MockProgressIndex(-1).isAfter(tsFileResource.getMaxProgressIndexAfterClose())); - Assert.assertFalse( - new MockProgressIndex(0).isAfter(tsFileResource.getMaxProgressIndexAfterClose())); - Assert.assertFalse( - new MockProgressIndex(1).isAfter(tsFileResource.getMaxProgressIndexAfterClose())); - Assert.assertFalse( - new MockProgressIndex(INDEX_NUM - 1) - .isAfter(tsFileResource.getMaxProgressIndexAfterClose())); + new MockProgressIndex(INDEX_NUM - 1).isAfter(tsFileResource.getMaxProgressIndex())); Assert.assertTrue( - new MockProgressIndex(INDEX_NUM).isAfter(tsFileResource.getMaxProgressIndexAfterClose())); + new MockProgressIndex(INDEX_NUM).isAfter(tsFileResource.getMaxProgressIndex())); Assert.assertTrue( - new MockProgressIndex(Integer.MAX_VALUE) - .isAfter(tsFileResource.getMaxProgressIndexAfterClose())); + new MockProgressIndex(Integer.MAX_VALUE).isAfter(tsFileResource.getMaxProgressIndex())); Assert.assertFalse( - new MockProgressIndex(1, INDEX_NUM - 1) - .isAfter(tsFileResource.getMaxProgressIndexAfterClose())); + new MockProgressIndex(1, INDEX_NUM - 1).isAfter(tsFileResource.getMaxProgressIndex())); } @Test diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManagerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManagerTest.java deleted file mode 100644 index 9c9290f98529..000000000000 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/checkpoint/CheckpointManagerTest.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint; - -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable; -import org.apache.iotdb.db.storageengine.dataregion.wal.io.CheckpointReader; -import org.apache.iotdb.db.storageengine.dataregion.wal.recover.CheckpointRecoverUtils; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.CheckpointFileUtils; -import org.apache.iotdb.db.utils.EnvironmentUtils; -import org.apache.iotdb.db.utils.constant.TestConstant; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.File; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class CheckpointManagerTest { - private static final String database = "root.test"; - private static final String dataRegionId = "1"; - private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); - private static final String identifier = String.valueOf(Integer.MAX_VALUE); - private static final String logDirectory = TestConstant.BASE_OUTPUT_PATH.concat("wal-test"); - private CheckpointManager checkpointManager; - private long prevFileSize; - - @Before - public void setUp() throws Exception { - EnvironmentUtils.cleanDir(logDirectory); - prevFileSize = config.getCheckpointFileSizeThresholdInByte(); - config.setCheckpointFileSizeThresholdInByte(10 * 1024); - checkpointManager = new CheckpointManager(identifier, logDirectory); - } - - @After - public void tearDown() throws Exception { - checkpointManager.close(); - config.setCheckpointFileSizeThresholdInByte(prevFileSize); - EnvironmentUtils.cleanDir(logDirectory); - } - - @Test - public void testNewFile() { - Checkpoint initCheckpoint = - new Checkpoint(CheckpointType.GLOBAL_MEMORY_TABLE_INFO, Collections.emptyList()); - List expectedCheckpoints = Collections.singletonList(initCheckpoint); - CheckpointReader checkpointReader = - new CheckpointReader( - new File(logDirectory + File.separator + CheckpointFileUtils.getLogFileName(0))); - List actualCheckpoints = checkpointReader.getCheckpoints(); - assertEquals(expectedCheckpoints, actualCheckpoints); - } - - @Test - public void testConcurrentWrite() throws Exception { - // start write threads to write concurrently - int threadsNum = 5; - ExecutorService executorService = Executors.newFixedThreadPool(threadsNum); - List> futures = new ArrayList<>(); - Map expectedMemTableId2Info = new ConcurrentHashMap<>(); - Map versionId2memTableId = new ConcurrentHashMap<>(); - // create 10 memTables, and flush the first 5 of them - int memTablesNum = 10; - for (int i = 0; i < memTablesNum; ++i) { - long versionId = i; - Callable writeTask = - () -> { - String tsFilePath = logDirectory + File.separator + versionId + ".tsfile"; - MemTableInfo memTableInfo = - new MemTableInfo( - new PrimitiveMemTable(database, dataRegionId), tsFilePath, versionId); - versionId2memTableId.put(versionId, memTableInfo.getMemTableId()); - checkpointManager.makeCreateMemTableCPInMemory(memTableInfo); - checkpointManager.makeCreateMemTableCPOnDisk(memTableInfo.getMemTableId()); - if (versionId < memTablesNum / 2) { - checkpointManager.makeFlushMemTableCP(versionId2memTableId.get(versionId)); - } else { - expectedMemTableId2Info.put(memTableInfo.getMemTableId(), memTableInfo); - } - return null; - }; - Future future = executorService.submit(writeTask); - futures.add(future); - } - // wait until all write tasks are done - for (Future future : futures) { - future.get(); - } - executorService.shutdown(); - // check first valid version id - assertEquals(memTablesNum / 2, checkpointManager.getFirstValidWALVersionId()); - // recover info from checkpoint file - Map actualMemTableId2Info = - CheckpointRecoverUtils.recoverMemTableInfo(new File(logDirectory)).getMemTableId2Info(); - assertEquals(expectedMemTableId2Info, actualMemTableId2Info); - } - - @Test - public void testTriggerLogRoller() { - // create memTables until reach LOG_SIZE_LIMIT, and flush the first 5 of them - int size = 0; - long versionId = 0; - Map expectedMemTableId2Info = new HashMap<>(); - Map versionId2memTableId = new HashMap<>(); - while (size < config.getCheckpointFileSizeThresholdInByte()) { - ++versionId; - String tsFilePath = logDirectory + File.separator + versionId + ".tsfile"; - MemTableInfo memTableInfo = - new MemTableInfo(new PrimitiveMemTable(database, dataRegionId), tsFilePath, versionId); - versionId2memTableId.put(versionId, memTableInfo.getMemTableId()); - Checkpoint checkpoint = - new Checkpoint( - CheckpointType.CREATE_MEMORY_TABLE, Collections.singletonList(memTableInfo)); - size += checkpoint.serializedSize(); - checkpointManager.makeCreateMemTableCPInMemory(memTableInfo); - checkpointManager.makeCreateMemTableCPOnDisk(memTableInfo.getMemTableId()); - if (versionId < 5) { - checkpoint = - new Checkpoint( - CheckpointType.FLUSH_MEMORY_TABLE, Collections.singletonList(memTableInfo)); - size += checkpoint.serializedSize(); - checkpointManager.makeFlushMemTableCP(versionId2memTableId.get(versionId)); - } else { - expectedMemTableId2Info.put(memTableInfo.getMemTableId(), memTableInfo); - } - } - checkpointManager.fsyncCheckpointFile(); - // check first valid version id - assertEquals(5, checkpointManager.getFirstValidWALVersionId()); - // check checkpoint files - assertFalse( - new File(logDirectory + File.separator + CheckpointFileUtils.getLogFileName(0)).exists()); - assertTrue( - new File(logDirectory + File.separator + CheckpointFileUtils.getLogFileName(1)).exists()); - // recover info from checkpoint file - Map actualMemTableId2Info = - CheckpointRecoverUtils.recoverMemTableInfo(new File(logDirectory)).getMemTableId2Info(); - assertEquals(expectedMemTableId2Info, actualMemTableId2Info); - } -} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALEntryHandlerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALEntryHandlerTest.java deleted file mode 100644 index d5913e54355c..000000000000 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALEntryHandlerTest.java +++ /dev/null @@ -1,586 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.db.storageengine.dataregion.wal.node; - -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.consensus.iot.log.ConsensusReqReader; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowsNode; -import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable; -import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable; -import org.apache.iotdb.db.storageengine.dataregion.wal.buffer.WALEntry; -import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.CheckpointManager; -import org.apache.iotdb.db.storageengine.dataregion.wal.checkpoint.MemTableInfo; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener; -import org.apache.iotdb.db.utils.EnvironmentUtils; -import org.apache.iotdb.db.utils.constant.TestConstant; - -import org.apache.tsfile.common.conf.TSFileConfig; -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.utils.Binary; -import org.apache.tsfile.write.schema.MeasurementSchema; -import org.awaitility.Awaitility; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class WALEntryHandlerTest { - private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); - private static final String identifier1 = String.valueOf(Integer.MAX_VALUE); - private static final String identifier2 = String.valueOf(Integer.MAX_VALUE - 1); - private static final String logDirectory1 = - TestConstant.BASE_OUTPUT_PATH.concat("wal-test" + identifier1); - private static final String logDirectory2 = - TestConstant.BASE_OUTPUT_PATH.concat("wal-test" + identifier2); - - private static final String databasePath = "root.test_sg"; - private static final String devicePath = databasePath + ".test_d"; - private static final String dataRegionId = "1"; - private WALMode prevMode; - private WALNode walNode1; - private WALNode walNode2; - - @Before - public void setUp() throws Exception { - EnvironmentUtils.cleanDir(logDirectory1); - EnvironmentUtils.cleanDir(logDirectory2); - prevMode = config.getWalMode(); - config.setWalMode(WALMode.SYNC); - walNode1 = new WALNode(identifier1, logDirectory1); - walNode2 = new WALNode(identifier2, logDirectory2); - } - - @After - public void tearDown() throws Exception { - walNode1.close(); - walNode2.close(); - config.setWalMode(prevMode); - EnvironmentUtils.cleanDir(logDirectory1); - EnvironmentUtils.cleanDir(logDirectory2); - WALInsertNodeCache.getInstance().clear(); - } - - @Test(expected = MemTablePinException.class) - public void pinDeletedMemTable1() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - WALFlushListener flushListener = - walNode1.log( - memTable.getMemTableId(), getInsertRowNode(devicePath, System.currentTimeMillis())); - walNode1.onMemTableFlushed(memTable); - Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed()); - // pin flushed memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - } - - @Test(expected = MemTablePinException.class) - public void pinDeletedMemTable2() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - WALFlushListener flushListener = - walNode1.log( - memTable.getMemTableId(), getInsertRowsNode(devicePath, System.currentTimeMillis())); - walNode1.onMemTableFlushed(memTable); - Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed()); - // pin flushed memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - } - - @Test - public void pinMemTable1() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - // roll wal file - walNode1.rollWALFile(); - InsertRowNode node2 = getInsertRowNode(devicePath, System.currentTimeMillis()); - node2.setSearchIndex(2); - walNode1.log(memTable.getMemTableId(), node2); - walNode1.onMemTableFlushed(memTable); - walNode1.rollWALFile(); - // find node1 - ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1); - assertTrue(itr.hasNext()); - assertEquals( - node1, - WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer())); - // try to delete flushed but pinned memTable - walNode1.deleteOutdatedFiles(); - // try to find node1 - itr = walNode1.getReqIterator(1); - assertTrue(itr.hasNext()); - assertEquals( - node1, - WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer())); - } - - @Test - public void pinMemTable2() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - // roll wal file - walNode1.rollWALFile(); - InsertRowsNode node2 = getInsertRowsNode(devicePath, System.currentTimeMillis()); - node2.setSearchIndex(2); - walNode1.log(memTable.getMemTableId(), node2); - walNode1.onMemTableFlushed(memTable); - walNode1.rollWALFile(); - // find node1 - ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1); - assertTrue(itr.hasNext()); - assertEquals( - node1, - WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer())); - // try to delete flushed but pinned memTable - walNode1.deleteOutdatedFiles(); - // try to find node1 - itr = walNode1.getReqIterator(1); - assertTrue(itr.hasNext()); - assertEquals( - node1, - WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer())); - } - - @Test(expected = MemTablePinException.class) - public void unpinDeletedMemTable1() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - WALFlushListener flushListener = - walNode1.log( - memTable.getMemTableId(), getInsertRowNode(devicePath, System.currentTimeMillis())); - walNode1.onMemTableFlushed(memTable); - // pin flushed memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.unpinMemTable(); - } - - @Test(expected = MemTablePinException.class) - public void unpinDeletedMemTable2() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - WALFlushListener flushListener = - walNode1.log( - memTable.getMemTableId(), getInsertRowsNode(devicePath, System.currentTimeMillis())); - walNode1.onMemTableFlushed(memTable); - // pin flushed memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.unpinMemTable(); - } - - @Test - public void unpinFlushedMemTable1() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - WALFlushListener flushListener = - walNode1.log( - memTable.getMemTableId(), getInsertRowNode(devicePath, System.currentTimeMillis())); - WALEntryHandler handler = flushListener.getWalEntryHandler(); - // pin twice - handler.pinMemTable(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed()); - // unpin 1 - CheckpointManager checkpointManager = walNode1.getCheckpointManager(); - handler.unpinMemTable(); - MemTableInfo oldestMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo(); - assertNull(oldestMemTableInfo); - // unpin 2 - handler.unpinMemTable(); - assertNull(checkpointManager.getOldestUnpinnedMemTableInfo()); - } - - @Test - public void unpinFlushedMemTable2() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - WALFlushListener flushListener = - walNode1.log( - memTable.getMemTableId(), getInsertRowsNode(devicePath, System.currentTimeMillis())); - WALEntryHandler handler = flushListener.getWalEntryHandler(); - // pin twice - handler.pinMemTable(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed()); - // unpin 1 - CheckpointManager checkpointManager = walNode1.getCheckpointManager(); - handler.unpinMemTable(); - MemTableInfo oldestMemTableInfo = checkpointManager.getOldestUnpinnedMemTableInfo(); - assertNull(oldestMemTableInfo); - // unpin 2 - handler.unpinMemTable(); - assertNull(checkpointManager.getOldestUnpinnedMemTableInfo()); - } - - @Test - public void unpinMemTable1() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - // roll wal file - walNode1.rollWALFile(); - walNode1.rollWALFile(); - // find node1 - ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1); - assertTrue(itr.hasNext()); - assertEquals( - node1, - WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer())); - // unpin flushed memTable - handler.unpinMemTable(); - // try to delete flushed but pinned memTable - walNode1.deleteOutdatedFiles(); - // try to find node1 - itr = walNode1.getReqIterator(1); - assertFalse(itr.hasNext()); - } - - @Test - public void unpinMemTable2() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - // roll wal file - walNode1.rollWALFile(); - walNode1.rollWALFile(); - // find node1 - ConsensusReqReader.ReqIterator itr = walNode1.getReqIterator(1); - assertTrue(itr.hasNext()); - assertEquals( - node1, - WALEntry.deserializeForConsensus(itr.next().getRequests().get(0).serializeToByteBuffer())); - // unpin flushed memTable - handler.unpinMemTable(); - // try to delete flushed but pinned memTable - walNode1.deleteOutdatedFiles(); - // try to find node1 - itr = walNode1.getReqIterator(1); - assertFalse(itr.hasNext()); - } - - @Test - public void getUnFlushedValue1() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - assertEquals(node1, handler.getInsertNode()); - } - - @Test - public void getUnFlushedValue2() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - assertEquals(node1, handler.getInsertNode()); - } - - @Test - public void getFlushedValue1() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowNode node1 = getInsertRowNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - // wait until wal flushed - Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed()); - assertEquals(node1, handler.getInsertNode()); - } - - @Test - public void getFlushedValue2() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable, logDirectory1 + "/" + "fake.tsfile"); - InsertRowsNode node1 = getInsertRowsNode(devicePath, System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode1.log(memTable.getMemTableId(), node1); - // pin memTable - WALEntryHandler handler = flushListener.getWalEntryHandler(); - handler.pinMemTable(); - walNode1.onMemTableFlushed(memTable); - // wait until wal flushed - Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed()); - assertEquals(node1, handler.getInsertNode()); - } - - @Test - public void testConcurrentGetValue1() throws Exception { - int threadsNum = 10; - ExecutorService executorService = Executors.newFixedThreadPool(threadsNum); - List> futures = new ArrayList<>(); - for (int i = 0; i < threadsNum; ++i) { - WALNode walNode = i % 2 == 0 ? walNode1 : walNode2; - String logDirectory = i % 2 == 0 ? logDirectory1 : logDirectory2; - Callable writeTask = - () -> { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile"); - - List walFlushListeners = new ArrayList<>(); - List expectedInsertRowNodes = new ArrayList<>(); - try { - for (int j = 0; j < 1_000; ++j) { - long memTableId = memTable.getMemTableId(); - InsertRowNode node = - getInsertRowNode(devicePath + memTableId, System.currentTimeMillis()); - expectedInsertRowNodes.add(node); - WALFlushListener walFlushListener = walNode.log(memTableId, node); - walFlushListeners.add(walFlushListener); - } - } catch (IllegalPathException e) { - fail(); - } - - // wait until wal flushed - Awaitility.await().until(walNode::isAllWALEntriesConsumed); - - walFlushListeners.get(0).getWalEntryHandler().pinMemTable(); - walNode.onMemTableFlushed(memTable); - - for (int j = 0; j < expectedInsertRowNodes.size(); ++j) { - InsertRowNode expect = expectedInsertRowNodes.get(j); - InsertRowNode actual = - (InsertRowNode) walFlushListeners.get(j).getWalEntryHandler().getInsertNode(); - assertEquals(expect, actual); - } - - walFlushListeners.get(0).getWalEntryHandler().unpinMemTable(); - return null; - }; - Future future = executorService.submit(writeTask); - futures.add(future); - } - // wait until all write tasks are done - for (Future future : futures) { - future.get(); - } - executorService.shutdown(); - } - - @Test - public void testConcurrentGetValue2() throws Exception { - int threadsNum = 10; - ExecutorService executorService = Executors.newFixedThreadPool(threadsNum); - List> futures = new ArrayList<>(); - for (int i = 0; i < threadsNum; ++i) { - WALNode walNode = i % 2 == 0 ? walNode1 : walNode2; - String logDirectory = i % 2 == 0 ? logDirectory1 : logDirectory2; - Callable writeTask = - () -> { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile"); - - List walFlushListeners = new ArrayList<>(); - List expectedInsertRowsNodes = new ArrayList<>(); - try { - for (int j = 0; j < 1_000; ++j) { - long memTableId = memTable.getMemTableId(); - InsertRowsNode node = - getInsertRowsNode(devicePath + memTableId, System.currentTimeMillis()); - expectedInsertRowsNodes.add(node); - WALFlushListener walFlushListener = walNode.log(memTableId, node); - walFlushListeners.add(walFlushListener); - } - } catch (IllegalPathException e) { - fail(); - } - - // wait until wal flushed - Awaitility.await().until(walNode::isAllWALEntriesConsumed); - - walFlushListeners.get(0).getWalEntryHandler().pinMemTable(); - walNode.onMemTableFlushed(memTable); - - for (int j = 0; j < expectedInsertRowsNodes.size(); ++j) { - InsertRowsNode expect = expectedInsertRowsNodes.get(j); - InsertRowsNode actual = - (InsertRowsNode) walFlushListeners.get(j).getWalEntryHandler().getInsertNode(); - assertEquals(expect, actual); - } - - walFlushListeners.get(0).getWalEntryHandler().unpinMemTable(); - return null; - }; - Future future = executorService.submit(writeTask); - futures.add(future); - } - // wait until all write tasks are done - for (Future future : futures) { - future.get(); - } - executorService.shutdown(); - } - - private InsertRowNode getInsertRowNode(String devicePath, long time) throws IllegalPathException { - TSDataType[] dataTypes = - new TSDataType[] { - TSDataType.DOUBLE, - TSDataType.FLOAT, - TSDataType.INT64, - TSDataType.INT32, - TSDataType.BOOLEAN, - TSDataType.TEXT - }; - - Object[] columns = new Object[6]; - columns[0] = 1.0d; - columns[1] = 2f; - columns[2] = 10000L; - columns[3] = 100; - columns[4] = false; - columns[5] = new Binary("hh" + 0, TSFileConfig.STRING_CHARSET); - - InsertRowNode node = - new InsertRowNode( - new PlanNodeId(""), - new PartialPath(devicePath), - false, - new String[] {"s1", "s2", "s3", "s4", "s5", "s6"}, - dataTypes, - time, - columns, - false); - MeasurementSchema[] schemas = new MeasurementSchema[6]; - for (int i = 0; i < 6; i++) { - schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]); - } - node.setMeasurementSchemas(schemas); - return node; - } - - private InsertRowsNode getInsertRowsNode(String devicePath, long firstTime) - throws IllegalPathException { - TSDataType[] dataTypes = - new TSDataType[] { - TSDataType.DOUBLE, - TSDataType.FLOAT, - TSDataType.INT64, - TSDataType.INT32, - TSDataType.BOOLEAN, - TSDataType.TEXT - }; - - Object[] columns = new Object[6]; - columns[0] = 1.0d; - columns[1] = 2f; - columns[2] = 10000L; - columns[3] = 100; - columns[4] = false; - columns[5] = new Binary("hh" + 0, TSFileConfig.STRING_CHARSET); - - InsertRowNode node = - new InsertRowNode( - new PlanNodeId(""), - new PartialPath(devicePath), - false, - new String[] {"s1", "s2", "s3", "s4", "s5", "s6"}, - dataTypes, - firstTime, - columns, - false); - MeasurementSchema[] schemas = new MeasurementSchema[6]; - for (int i = 0; i < 6; i++) { - schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]); - } - node.setMeasurementSchemas(schemas); - - InsertRowsNode insertRowsNode = new InsertRowsNode(new PlanNodeId("")); - insertRowsNode.addOneInsertRowNode(node, 0); - - node = - new InsertRowNode( - new PlanNodeId(""), - new PartialPath(devicePath), - false, - new String[] {"s1", "s2", "s3", "s4", "s5", "s6"}, - dataTypes, - firstTime + 10, - columns, - false); - schemas = new MeasurementSchema[6]; - for (int i = 0; i < 6; i++) { - schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]); - } - node.setMeasurementSchemas(schemas); - insertRowsNode.addOneInsertRowNode(node, 1); - return insertRowsNode; - } -} diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java index f72f55a3cb8a..df88c34ee0eb 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WALNodeTest.java @@ -66,7 +66,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -149,14 +148,6 @@ public void testConcurrentWrite() throws Exception { } } assertEquals(expectedInsertTabletNodes, actualInsertTabletNodes); - // check flush listeners - try { - for (WALFlushListener walFlushListener : walFlushListeners) { - assertNotEquals(WALFlushListener.Status.FAILURE, walFlushListener.waitForResult()); - } - } catch (NullPointerException e) { - // ignore - } } private void writeInsertTabletNode( @@ -329,13 +320,5 @@ public void testDeleteOutdatedFiles() throws Exception { + File.separator + WALFileUtils.getLogFileName(1, 0, WALFileStatus.CONTAINS_SEARCH_INDEX)) .exists()); - // check flush listeners - try { - for (WALFlushListener walFlushListener : walFlushListeners) { - assertNotEquals(WALFlushListener.Status.FAILURE, walFlushListener.waitForResult()); - } - } catch (NullPointerException e) { - // ignore - } } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java index 593d25b6932c..c132d268c7d3 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/node/WalDeleteOutdatedNewTest.java @@ -33,12 +33,8 @@ import org.apache.iotdb.db.storageengine.dataregion.DataRegionTest; import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable; import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable; -import org.apache.iotdb.db.storageengine.dataregion.wal.exception.MemTablePinException; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALEntryHandler; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALFileUtils; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALInsertNodeCache; import org.apache.iotdb.db.storageengine.dataregion.wal.utils.WALMode; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener; import org.apache.iotdb.db.utils.EnvironmentUtils; import org.apache.iotdb.db.utils.constant.TestConstant; @@ -88,7 +84,6 @@ public void tearDown() throws Exception { config.setDataRegionConsensusProtocolClass(prevConsensus); EnvironmentUtils.cleanDir(logDirectory1); StorageEngine.getInstance().reset(); - WALInsertNodeCache.getInstance().clear(); } /** @@ -282,66 +277,6 @@ public void test04() throws IllegalPathException { Assert.assertEquals(1, WALFileUtils.listAllWALFiles(new File(logDirectory1)).length); } - /** - * Ensure that wal pinned to memtable cannot be deleted:
- * 1. _0-0-1.wal: memTable0
- * 2. pin memTable0
- * 3. memTable0 flush
- * 4. roll wal file
- * 5. _1-1-1.wal: memTable0、memTable1
- * 6. roll wal file
- * 7. _2-1-1.wal: memTable1
- * 8. roll wal file
- * 9. _2-1-1.wal: memTable1
- * 10. wait until all walEntry consumed
- * 11. memTable0 flush, memTable1 flush
- * 12. delete outdated wal files - */ - @Test - public void test05() throws IllegalPathException, MemTablePinException { - IMemTable memTable0 = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable0, logDirectory1 + "/" + "fake.tsfile"); - WALFlushListener listener = - walNode1.log( - memTable0.getMemTableId(), - generateInsertRowNode(devicePath, System.currentTimeMillis(), 1)); - walNode1.rollWALFile(); - - // pin memTable - WALEntryHandler handler = listener.getWalEntryHandler(); - handler.pinMemTable(); - walNode1.log( - memTable0.getMemTableId(), - generateInsertRowNode(devicePath, System.currentTimeMillis(), 2)); - IMemTable memTable1 = new PrimitiveMemTable(databasePath, dataRegionId); - walNode1.onMemTableCreated(memTable1, logDirectory1 + "/" + "fake.tsfile"); - walNode1.log( - memTable1.getMemTableId(), - generateInsertRowNode(devicePath, System.currentTimeMillis(), 3)); - walNode1.rollWALFile(); - - walNode1.log( - memTable1.getMemTableId(), - generateInsertRowNode(devicePath, System.currentTimeMillis(), 4)); - walNode1.rollWALFile(); - - walNode1.log( - memTable1.getMemTableId(), - generateInsertRowNode(devicePath, System.currentTimeMillis(), 5)); - walNode1.onMemTableFlushed(memTable0); - walNode1.onMemTableFlushed(memTable1); - Awaitility.await().until(() -> walNode1.isAllWALEntriesConsumed()); - - Map> memTableIdsOfWal = walNode1.getWALBuffer().getMemTableIdsOfWal(); - Assert.assertEquals(4, memTableIdsOfWal.size()); - Assert.assertEquals(4, WALFileUtils.listAllWALFiles(new File(logDirectory1)).length); - - walNode1.deleteOutdatedFiles(); - Map> memTableIdsOfWalAfter = walNode1.getWALBuffer().getMemTableIdsOfWal(); - Assert.assertEquals(3, memTableIdsOfWalAfter.size()); - Assert.assertEquals(3, WALFileUtils.listAllWALFiles(new File(logDirectory1)).length); - } - /** * Ensure that the flushed wal related to memtable cannot be deleted:
* 1. _0-0-1.wal: memTable0
diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCacheTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCacheTest.java deleted file mode 100644 index 552c8334f953..000000000000 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/storageengine/dataregion/wal/utils/WALInsertNodeCacheTest.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.db.storageengine.dataregion.wal.utils; - -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.PlanNodeId; -import org.apache.iotdb.db.queryengine.plan.planner.plan.node.write.InsertRowNode; -import org.apache.iotdb.db.storageengine.dataregion.memtable.IMemTable; -import org.apache.iotdb.db.storageengine.dataregion.memtable.PrimitiveMemTable; -import org.apache.iotdb.db.storageengine.dataregion.wal.node.WALNode; -import org.apache.iotdb.db.storageengine.dataregion.wal.utils.listener.WALFlushListener; -import org.apache.iotdb.db.utils.EnvironmentUtils; -import org.apache.iotdb.db.utils.constant.TestConstant; - -import org.apache.tsfile.common.conf.TSFileConfig; -import org.apache.tsfile.enums.TSDataType; -import org.apache.tsfile.utils.Binary; -import org.apache.tsfile.write.schema.MeasurementSchema; -import org.awaitility.Awaitility; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class WALInsertNodeCacheTest { - private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); - private static final String identifier = String.valueOf(Integer.MAX_VALUE); - private static final String logDirectory = TestConstant.BASE_OUTPUT_PATH.concat("wal-test"); - private static final String databasePath = "root.test_sg"; - private static final String devicePath = databasePath + ".test_d"; - private static final String dataRegionId = "1"; - private static final WALInsertNodeCache cache = WALInsertNodeCache.getInstance(); - private WALMode prevMode; - private WALNode walNode; - - @Before - public void setUp() throws Exception { - EnvironmentUtils.cleanDir(logDirectory); - cache.clear(); - prevMode = config.getWalMode(); - config.setWalMode(WALMode.SYNC); - walNode = new WALNode(identifier, logDirectory); - } - - @After - public void tearDown() throws Exception { - walNode.close(); - cache.clear(); - config.setWalMode(prevMode); - EnvironmentUtils.cleanDir(logDirectory); - } - - @Test - public void testLoadAfterSyncBuffer() throws IllegalPathException { - try { - // Limit the wal buffer size to trigger sync Buffer when writing wal entry - walNode.setBufferSize(24); - // write memTable - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile"); - InsertRowNode node1 = getInsertRowNode(System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode.log(memTable.getMemTableId(), node1); - WALEntryPosition position = flushListener.getWalEntryHandler().getWalEntryPosition(); - // wait until wal flushed - walNode.rollWALFile(); - Awaitility.await().until(() -> walNode.isAllWALEntriesConsumed() && position.canRead()); - // load by cache - System.out.println(position.getPosition()); - assertEquals(node1, cache.getInsertNode(position)); - } finally { - walNode.setBufferSize(config.getWalBufferSize()); - } - } - - @Test - public void testGetInsertNodeInParallel() throws IllegalPathException { - // write memTable - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile"); - InsertRowNode node1 = getInsertRowNode(System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode.log(memTable.getMemTableId(), node1); - WALEntryPosition position = flushListener.getWalEntryHandler().getWalEntryPosition(); - // wait until wal flushed - walNode.rollWALFile(); - Awaitility.await().until(() -> walNode.isAllWALEntriesConsumed() && position.canRead()); - // Test getInsertNode in parallel to detect buffer concurrent problem - AtomicBoolean failure = new AtomicBoolean(false); - List threadList = new ArrayList<>(5); - for (int i = 0; i < 5; ++i) { - Thread getInsertNodeThread = - new Thread( - () -> { - if (!node1.equals(cache.getInsertNode(position))) { - failure.set(true); - } - }); - threadList.add(getInsertNodeThread); - getInsertNodeThread.start(); - } - Awaitility.await() - .until( - () -> { - for (Thread thread : threadList) { - if (thread.isAlive()) { - return false; - } - } - return true; - }); - assertFalse(failure.get()); - } - - @Test - public void testLoadUnsealedWALFile() throws Exception { - IMemTable memTable = new PrimitiveMemTable(databasePath, dataRegionId); - walNode.onMemTableCreated(memTable, logDirectory + "/" + "fake.tsfile"); - InsertRowNode node1 = getInsertRowNode(System.currentTimeMillis()); - node1.setSearchIndex(1); - WALFlushListener flushListener = walNode.log(memTable.getMemTableId(), node1); - WALEntryPosition position = flushListener.getWalEntryHandler().getWalEntryPosition(); - // wait until wal flushed - Awaitility.await().until(() -> walNode.isAllWALEntriesConsumed() && position.canRead()); - // load by cache - assertEquals(node1, cache.getInsertNode(position)); - } - - private InsertRowNode getInsertRowNode(long time) throws IllegalPathException { - TSDataType[] dataTypes = - new TSDataType[] { - TSDataType.DOUBLE, - TSDataType.FLOAT, - TSDataType.INT64, - TSDataType.INT32, - TSDataType.BOOLEAN, - TSDataType.TEXT - }; - - Object[] columns = new Object[6]; - columns[0] = 1.0d; - columns[1] = 2f; - columns[2] = 10000L; - columns[3] = 100; - columns[4] = false; - columns[5] = new Binary("hh" + 0, TSFileConfig.STRING_CHARSET); - - InsertRowNode node = - new InsertRowNode( - new PlanNodeId(""), - new PartialPath(WALInsertNodeCacheTest.devicePath), - false, - new String[] {"s1", "s2", "s3", "s4", "s5", "s6"}, - dataTypes, - time, - columns, - false); - MeasurementSchema[] schemas = new MeasurementSchema[6]; - for (int i = 0; i < 6; i++) { - schemas[i] = new MeasurementSchema("s" + (i + 1), dataTypes[i]); - } - node.setMeasurementSchemas(schemas); - return node; - } -} diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/IoTDBThreadPoolFactory.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/IoTDBThreadPoolFactory.java index 6234705ed2da..73d6ff22430d 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/IoTDBThreadPoolFactory.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/IoTDBThreadPoolFactory.java @@ -160,6 +160,21 @@ public static ExecutorService newSingleThreadExecutor( poolName); } + public static ExecutorService newSingleThreadExecutor( + String poolName, RejectedExecutionHandler handler) { + logger.info(NEW_SINGLE_THREAD_POOL_LOGGER_FORMAT, poolName); + return new WrappedSingleThreadExecutorService( + new ThreadPoolExecutor( + 1, + 1, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(), + new IoTThreadFactory(poolName), + handler), + poolName); + } + /** * see {@link Executors#newCachedThreadPool(java.util.concurrent.ThreadFactory)}. * diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java index cf23e22b4cec..b8f21ef69aba 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/concurrent/ThreadName.java @@ -69,6 +69,8 @@ public enum ThreadName { PBTREE_RELEASE_MONITOR("PBTree-Release-Task-Monitor"), PBTREE_FLUSH_MONITOR("PBTree-Flush-Monitor"), PBTREE_WORKER_POOL("PBTree-Worker-Pool"), + SCHEMA_PARALLEL_POOL("Schema-Parallel-Pool"), + // -------------------------- ClientService -------------------------- CLIENT_RPC_SERVICE("ClientRPC-Service"), CLIENT_RPC_PROCESSOR("ClientRPC-Processor"), @@ -133,6 +135,7 @@ public enum ThreadName { PIPE_CONSENSUS_EXECUTOR_POOL("Pipe-Consensus-Executor-Pool"), PIPE_CONFIGNODE_EXECUTOR_POOL("Pipe-ConfigNode-Executor-Pool"), PIPE_SUBTASK_CALLBACK_EXECUTOR_POOL("Pipe-SubTask-Callback-Executor-Pool"), + PIPE_TSFILE_ASYNC_SEND_POOL("Pipe-TsFile-Async-Send-Pool"), PIPE_RUNTIME_META_SYNCER("Pipe-Runtime-Meta-Syncer"), PIPE_RUNTIME_HEARTBEAT("Pipe-Runtime-Heartbeat"), PIPE_RUNTIME_PROCEDURE_SUBMITTER("Pipe-Runtime-Procedure-Submitter"), @@ -142,6 +145,7 @@ public enum ThreadName { PIPE_ASYNC_CONNECTOR_CLIENT_POOL("Pipe-Async-Connector-Client-Pool"), PIPE_RECEIVER_AIR_GAP_AGENT("Pipe-Receiver-Air-Gap-Agent"), PIPE_AIR_GAP_RECEIVER("Pipe-Air-Gap-Receiver"), + PIPE_PARALLEL_EXECUTION_POOL("Pipe-Parallel-Execution-Pool"), LOAD_DATATYPE_CONVERT_POOL("Load-Datatype-Convert-Pool"), SUBSCRIPTION_EXECUTOR_POOL("Subscription-Executor-Pool"), SUBSCRIPTION_RUNTIME_META_SYNCER("Subscription-Runtime-Meta-Syncer"), @@ -241,7 +245,8 @@ public enum ThreadName { PBTREE_RELEASE_MONITOR, SCHEMA_FORCE_MLOG, PBTREE_FLUSH_MONITOR, - PBTREE_WORKER_POOL)); + PBTREE_WORKER_POOL, + SCHEMA_PARALLEL_POOL)); private static final Set clientServiceThreadNames = new HashSet<>(Arrays.asList(CLIENT_RPC_SERVICE, CLIENT_RPC_PROCESSOR)); @@ -297,6 +302,7 @@ public enum ThreadName { PIPE_ASYNC_CONNECTOR_CLIENT_POOL, PIPE_RECEIVER_AIR_GAP_AGENT, PIPE_AIR_GAP_RECEIVER, + PIPE_PARALLEL_EXECUTION_POOL, SUBSCRIPTION_EXECUTOR_POOL, SUBSCRIPTION_RUNTIME_META_SYNCER, WINDOW_EVALUATION_SERVICE, diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java index bf1ef7d25a8b..8ae979bd7349 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java @@ -199,29 +199,52 @@ public class CommonConfig { private String pipeHardlinkTsFileDirName = "tsfile"; - private String pipeHardlinkWALDirName = "wal"; - - private boolean pipeHardLinkWALEnabled = false; - private boolean pipeFileReceiverFsyncEnabled = true; private int pipeRealTimeQueuePollTsFileThreshold = 10; - private int pipeRealTimeQueuePollHistoricalTsFileThreshold = 3; + + // Sequentially poll the tsFile by default + private int pipeRealTimeQueuePollHistoricalTsFileThreshold = 1; private int pipeRealTimeQueueMaxWaitingTsFileSize = 1; /** The maximum number of threads that can be used to execute subtasks in PipeSubtaskExecutor. */ private int pipeSubtaskExecutorMaxThreadNum = Math.max(5, Runtime.getRuntime().availableProcessors() / 2); - private int pipeNonForwardingEventsProgressReportInterval = 100; - private int pipeDataStructureTabletRowSize = 2048; private int pipeDataStructureTabletSizeInBytes = 2097152; - private double pipeDataStructureTabletMemoryBlockAllocationRejectThreshold = 0.2; - private double pipeDataStructureTsFileMemoryBlockAllocationRejectThreshold = 0.2; - private double pipeDataStructureWalMemoryProportion = 0.3; - private double PipeDataStructureBatchMemoryProportion = 0.1; - private double pipeTotalFloatingMemoryProportion = 0.2; + private double pipeDataStructureTabletMemoryBlockAllocationRejectThreshold = 0.3; + private double pipeDataStructureTsFileMemoryBlockAllocationRejectThreshold = 0.3; + private double PipeDataStructureBatchMemoryProportion = 0.2; + private volatile double pipeTotalFloatingMemoryProportion = 0.5; + + // Check if memory check is enabled for Pipe + private boolean isPipeEnableMemoryCheck = true; + + // Memory for InsertNode queue: 15MB, used to temporarily store data awaiting processing + private long pipeInsertNodeQueueMemory = 15 * MB; + + // Memory for TsFile to Tablet conversion: 17MB, used for further processing after converting + // TSFile format to Tablet format + // Note: Pipes that do not decompose pattern/time do not need this part of memory + private long pipeTsFileParserMemory = 17 * MB; + + // Memory for Sink batch sending (InsertNode/TsFile, choose one) + // 1. InsertNode: 15MB, used for batch sending data to the downstream system + private long pipeSinkBatchMemoryInsertNode = 15 * MB; + + // 2. TsFile: 15MB, used for storing data about to be written to TsFile, similar to memTable + private long pipeSinkBatchMemoryTsFile = 15 * MB; + + // Memory needed for the ReadBuffer during the TsFile sending process: 15MB, buffer for the file + // sending process + private long pipeSendTsFileReadBuffer = 15 * MB; + + // Reserved memory percentage to accommodate memory fluctuations during system operation + private double pipeReservedMemoryPercentage = 0.15; + + // Minimum memory required for the receiver: 38MB + private long pipeMinimumReceiverMemory = 38 * MB; private int pipeSubtaskExecutorBasicCheckPointIntervalByConsumedEventCount = 10_000; private long pipeSubtaskExecutorBasicCheckPointIntervalByTimeDuration = 10 * 1000L; @@ -234,7 +257,7 @@ public class CommonConfig { private int pipeExtractorAssignerDisruptorRingBufferSize = 65536; private long pipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes = 50; // 50B - private int pipeExtractorMatcherCacheSize = 1024; + private long pipeExtractorMatcherCacheSize = 1024; private int pipeConnectorHandshakeTimeoutMs = 10 * 1000; // 10 seconds private int pipeConnectorTransferTimeoutMs = 15 * 60 * 1000; // 15 minutes @@ -277,24 +300,12 @@ public class CommonConfig { private int pipeMaxAllowedHistoricalTsFilePerDataRegion = Integer.MAX_VALUE; // Deprecated private int pipeMaxAllowedPendingTsFileEpochPerDataRegion = Integer.MAX_VALUE; // Deprecated - private int pipeMaxAllowedPinnedMemTableCount = Integer.MAX_VALUE; // per data region private long pipeMaxAllowedLinkedTsFileCount = Long.MAX_VALUE; // Deprecated - private float pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage = 0.1F; - private long pipeStuckRestartIntervalSeconds = 120; - private long pipeStuckRestartMinIntervalMs = 5 * 60 * 1000L; // 5 minutes - private boolean pipeEpochKeepTsFileAfterStuckRestartEnabled = false; - private long pipeFlushAfterLastTerminateSeconds = 30; - private long pipeFlushAfterTerminateCount = 30; - private long pipeStorageEngineFlushTimeIntervalMs = Long.MAX_VALUE; - private int pipeMaxAllowedRemainingInsertEventCountPerPipe = 10000; - private int pipeMaxAllowedTotalRemainingInsertEventCount = 50000; private double pipeMetaReportMaxLogNumPerRound = 0.1; private int pipeMetaReportMaxLogIntervalRounds = 360; private int pipeTsFilePinMaxLogNumPerRound = 10; private int pipeTsFilePinMaxLogIntervalRounds = 90; - private int pipeWalPinMaxLogNumPerRound = 10; - private int pipeWalPinMaxLogIntervalRounds = 90; private boolean pipeMemoryManagementEnabled = true; private long pipeMemoryAllocateRetryIntervalMs = 50; @@ -722,23 +733,6 @@ public boolean isTimestampPrecisionCheckEnabled() { return timestampPrecisionCheckEnabled; } - public int getPipeNonForwardingEventsProgressReportInterval() { - return pipeNonForwardingEventsProgressReportInterval; - } - - public void setPipeNonForwardingEventsProgressReportInterval( - int pipeNonForwardingEventsProgressReportInterval) { - if (this.pipeNonForwardingEventsProgressReportInterval - == pipeNonForwardingEventsProgressReportInterval) { - return; - } - this.pipeNonForwardingEventsProgressReportInterval = - pipeNonForwardingEventsProgressReportInterval; - logger.info( - "pipeNonForwardingEventsProgressReportInterval is set to {}.", - pipeNonForwardingEventsProgressReportInterval); - } - public String getPipeHardlinkBaseDirName() { return pipeHardlinkBaseDirName; } @@ -763,30 +757,6 @@ public void setPipeHardlinkTsFileDirName(String pipeTsFileDirName) { logger.info("pipeHardlinkTsFileDirName is set to {}.", pipeTsFileDirName); } - public String getPipeHardlinkWALDirName() { - return pipeHardlinkWALDirName; - } - - public void setPipeHardlinkWALDirName(String pipeWALDirName) { - if (Objects.equals(pipeWALDirName, this.pipeHardlinkWALDirName)) { - return; - } - this.pipeHardlinkWALDirName = pipeWALDirName; - logger.info("pipeHardlinkWALDirName is set to {}.", pipeWALDirName); - } - - public boolean getPipeHardLinkWALEnabled() { - return pipeHardLinkWALEnabled; - } - - public void setPipeHardLinkWALEnabled(boolean pipeHardLinkWALEnabled) { - if (this.pipeHardLinkWALEnabled == pipeHardLinkWALEnabled) { - return; - } - this.pipeHardLinkWALEnabled = pipeHardLinkWALEnabled; - logger.info("pipeHardLinkWALEnabled is set to {}.", pipeHardLinkWALEnabled); - } - public boolean getPipeFileReceiverFsyncEnabled() { return pipeFileReceiverFsyncEnabled; } @@ -858,19 +828,6 @@ public void setPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold( pipeDataStructureTsFileMemoryBlockAllocationRejectThreshold); } - public double getPipeDataStructureWalMemoryProportion() { - return pipeDataStructureWalMemoryProportion; - } - - public void setPipeDataStructureWalMemoryProportion(double pipeDataStructureWalMemoryProportion) { - if (this.pipeDataStructureWalMemoryProportion == pipeDataStructureWalMemoryProportion) { - return; - } - this.pipeDataStructureWalMemoryProportion = pipeDataStructureWalMemoryProportion; - logger.info( - "pipeDataStructureWalMemoryProportion is set to {}.", pipeDataStructureWalMemoryProportion); - } - public double getPipeDataStructureBatchMemoryProportion() { return PipeDataStructureBatchMemoryProportion; } @@ -886,6 +843,102 @@ public void setPipeDataStructureBatchMemoryProportion( PipeDataStructureBatchMemoryProportion); } + public boolean isPipeEnableMemoryChecked() { + return isPipeEnableMemoryCheck; + } + + public void setIsPipeEnableMemoryChecked(boolean isPipeEnableMemoryChecked) { + if (this.isPipeEnableMemoryCheck == isPipeEnableMemoryChecked) { + return; + } + this.isPipeEnableMemoryCheck = isPipeEnableMemoryChecked; + logger.info("isPipeEnableMemoryChecked is set to {}.", isPipeEnableMemoryChecked); + } + + public long getPipeInsertNodeQueueMemory() { + return pipeInsertNodeQueueMemory; + } + + public void setPipeInsertNodeQueueMemory(long pipeInsertNodeQueueMemory) { + if (this.pipeInsertNodeQueueMemory == pipeInsertNodeQueueMemory) { + return; + } + this.pipeInsertNodeQueueMemory = pipeInsertNodeQueueMemory; + logger.info("pipeInsertNodeQueueMemory is set to {}.", pipeInsertNodeQueueMemory); + } + + public long getPipeTsFileParserMemory() { + return pipeTsFileParserMemory; + } + + public void setPipeTsFileParserMemory(long pipeTsFileParserMemory) { + if (this.pipeTsFileParserMemory == pipeTsFileParserMemory) { + return; + } + this.pipeTsFileParserMemory = pipeTsFileParserMemory; + logger.info("pipeTsFileParserMemory is set to {}.", pipeTsFileParserMemory); + } + + public long getPipeSinkBatchMemoryInsertNode() { + return pipeSinkBatchMemoryInsertNode; + } + + public void setPipeSinkBatchMemoryInsertNode(long pipeSinkBatchMemoryInsertNode) { + if (this.pipeSinkBatchMemoryInsertNode == pipeSinkBatchMemoryInsertNode) { + return; + } + this.pipeSinkBatchMemoryInsertNode = pipeSinkBatchMemoryInsertNode; + logger.info("pipeSinkBatchMemoryInsertNode is set to {}.", pipeSinkBatchMemoryInsertNode); + } + + public long getPipeSinkBatchMemoryTsFile() { + return pipeSinkBatchMemoryTsFile; + } + + public void setPipeSinkBatchMemoryTsFile(long pipeSinkBatchMemoryTsFile) { + if (this.pipeSinkBatchMemoryTsFile == pipeSinkBatchMemoryTsFile) { + return; + } + this.pipeSinkBatchMemoryTsFile = pipeSinkBatchMemoryTsFile; + logger.info("pipeSinkBatchMemoryTsFile is set to {}.", pipeSinkBatchMemoryTsFile); + } + + public long getPipeSendTsFileReadBuffer() { + return pipeSendTsFileReadBuffer; + } + + public void setPipeSendTsFileReadBuffer(long pipeSendTsFileReadBuffer) { + if (this.pipeSendTsFileReadBuffer == pipeSendTsFileReadBuffer) { + return; + } + this.pipeSendTsFileReadBuffer = pipeSendTsFileReadBuffer; + logger.info("pipeSendTsFileReadBuffer is set to {}.", pipeSendTsFileReadBuffer); + } + + public double getPipeReservedMemoryPercentage() { + return pipeReservedMemoryPercentage; + } + + public void setPipeReservedMemoryPercentage(double pipeReservedMemoryPercentage) { + if (this.pipeReservedMemoryPercentage == pipeReservedMemoryPercentage) { + return; + } + this.pipeReservedMemoryPercentage = pipeReservedMemoryPercentage; + logger.info("pipeReservedMemoryPercentage is set to {}.", pipeReservedMemoryPercentage); + } + + public long getPipeMinimumReceiverMemory() { + return pipeMinimumReceiverMemory; + } + + public void setPipeMinimumReceiverMemory(long pipeMinimumReceiverMemory) { + if (this.pipeMinimumReceiverMemory == pipeMinimumReceiverMemory) { + return; + } + this.pipeMinimumReceiverMemory = pipeMinimumReceiverMemory; + logger.info("pipeMinimumReceiverMemory is set to {}.", pipeMinimumReceiverMemory); + } + public double getPipeTotalFloatingMemoryProportion() { return pipeTotalFloatingMemoryProportion; } @@ -933,11 +986,11 @@ public void setPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes( pipeExtractorAssignerDisruptorRingBufferEntrySize); } - public int getPipeExtractorMatcherCacheSize() { + public long getPipeExtractorMatcherCacheSize() { return pipeExtractorMatcherCacheSize; } - public void setPipeExtractorMatcherCacheSize(int pipeExtractorMatcherCacheSize) { + public void setPipeExtractorMatcherCacheSize(long pipeExtractorMatcherCacheSize) { if (this.pipeExtractorMatcherCacheSize == pipeExtractorMatcherCacheSize) { return; } @@ -1489,19 +1542,6 @@ public void setPipeMaxAllowedPendingTsFileEpochPerDataRegion( pipeMaxAllowedPendingTsFileEpochPerDataRegion); } - public int getPipeMaxAllowedPinnedMemTableCount() { - return pipeMaxAllowedPinnedMemTableCount; - } - - public void setPipeMaxAllowedPinnedMemTableCount(int pipeMaxAllowedPinnedMemTableCount) { - if (this.pipeMaxAllowedPinnedMemTableCount == pipeMaxAllowedPinnedMemTableCount) { - return; - } - this.pipeMaxAllowedPinnedMemTableCount = pipeMaxAllowedPinnedMemTableCount; - logger.info( - "pipeMaxAllowedPinnedMemTableCount is set to {}", pipeMaxAllowedPinnedMemTableCount); - } - public long getPipeMaxAllowedLinkedTsFileCount() { return pipeMaxAllowedLinkedTsFileCount; } @@ -1514,135 +1554,6 @@ public void setPipeMaxAllowedLinkedTsFileCount(long pipeMaxAllowedLinkedTsFileCo logger.info("pipeMaxAllowedLinkedTsFileCount is set to {}", pipeMaxAllowedLinkedTsFileCount); } - public float getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage() { - return pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage; - } - - public void setPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage( - float pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage) { - if (this.pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage - == pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage) { - return; - } - this.pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage = - pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage; - logger.info( - "pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage is set to {}", - pipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage); - } - - public long getPipeStuckRestartIntervalSeconds() { - return pipeStuckRestartIntervalSeconds; - } - - public long getPipeStuckRestartMinIntervalMs() { - return pipeStuckRestartMinIntervalMs; - } - - public boolean isPipeEpochKeepTsFileAfterStuckRestartEnabled() { - return pipeEpochKeepTsFileAfterStuckRestartEnabled; - } - - public long getPipeStorageEngineFlushTimeIntervalMs() { - return pipeStorageEngineFlushTimeIntervalMs; - } - - public int getPipeMaxAllowedRemainingInsertEventCountPerPipe() { - return pipeMaxAllowedRemainingInsertEventCountPerPipe; - } - - public void setPipeMaxAllowedRemainingInsertEventCountPerPipe( - int pipeMaxAllowedRemainingInsertEventCountPerPipe) { - if (this.pipeMaxAllowedRemainingInsertEventCountPerPipe - == pipeMaxAllowedRemainingInsertEventCountPerPipe) { - return; - } - this.pipeMaxAllowedRemainingInsertEventCountPerPipe = - pipeMaxAllowedRemainingInsertEventCountPerPipe; - logger.info( - "pipeMaxAllowedRemainingInsertEventCount is set to {}", - pipeMaxAllowedRemainingInsertEventCountPerPipe); - } - - public int getPipeMaxAllowedTotalRemainingInsertEventCount() { - return pipeMaxAllowedTotalRemainingInsertEventCount; - } - - public void setPipeMaxAllowedTotalRemainingInsertEventCount( - int pipeMaxAllowedTotalRemainingInsertEventCount) { - if (this.pipeMaxAllowedTotalRemainingInsertEventCount - == pipeMaxAllowedTotalRemainingInsertEventCount) { - return; - } - this.pipeMaxAllowedTotalRemainingInsertEventCount = - pipeMaxAllowedTotalRemainingInsertEventCount; - logger.info( - "pipeMaxAllowedTotalRemainingInsertEventCount is set to {}", - pipeMaxAllowedTotalRemainingInsertEventCount); - } - - public void setPipeStuckRestartIntervalSeconds(long pipeStuckRestartIntervalSeconds) { - if (this.pipeStuckRestartIntervalSeconds == pipeStuckRestartIntervalSeconds) { - return; - } - this.pipeStuckRestartIntervalSeconds = pipeStuckRestartIntervalSeconds; - logger.info("pipeStuckRestartIntervalSeconds is set to {}", pipeStuckRestartIntervalSeconds); - } - - public void setPipeStuckRestartMinIntervalMs(long pipeStuckRestartMinIntervalMs) { - if (this.pipeStuckRestartMinIntervalMs == pipeStuckRestartMinIntervalMs) { - return; - } - this.pipeStuckRestartMinIntervalMs = pipeStuckRestartMinIntervalMs; - logger.info("pipeStuckRestartMinIntervalMs is set to {}", pipeStuckRestartMinIntervalMs); - } - - public void setPipeEpochKeepTsFileAfterStuckRestartEnabled( - boolean pipeEpochKeepTsFileAfterStuckRestartEnabled) { - if (this.pipeEpochKeepTsFileAfterStuckRestartEnabled - == pipeEpochKeepTsFileAfterStuckRestartEnabled) { - return; - } - this.pipeEpochKeepTsFileAfterStuckRestartEnabled = pipeEpochKeepTsFileAfterStuckRestartEnabled; - logger.info( - "pipeEpochKeepTsFileAfterStuckRestartEnabled is set to {}", - pipeEpochKeepTsFileAfterStuckRestartEnabled); - } - - public void setPipeStorageEngineFlushTimeIntervalMs(long pipeStorageEngineFlushTimeIntervalMs) { - if (this.pipeStorageEngineFlushTimeIntervalMs == pipeStorageEngineFlushTimeIntervalMs) { - return; - } - this.pipeStorageEngineFlushTimeIntervalMs = pipeStorageEngineFlushTimeIntervalMs; - logger.info( - "pipeStorageEngineFlushTimeIntervalMs is set to {}", pipeStorageEngineFlushTimeIntervalMs); - } - - public long getPipeFlushAfterLastTerminateSeconds() { - return pipeFlushAfterLastTerminateSeconds; - } - - public void setPipeFlushAfterLastTerminateSeconds(long pipeFlushAfterLastTerminateSeconds) { - if (this.pipeFlushAfterLastTerminateSeconds == pipeFlushAfterLastTerminateSeconds) { - return; - } - this.pipeFlushAfterLastTerminateSeconds = pipeFlushAfterLastTerminateSeconds; - logger.info( - "pipeFlushAfterLastTerminateSeconds is set to {}", pipeFlushAfterLastTerminateSeconds); - } - - public long getPipeFlushAfterTerminateCount() { - return pipeFlushAfterTerminateCount; - } - - public void setPipeFlushAfterTerminateCount(long pipeFlushAfterTerminateCount) { - if (this.pipeFlushAfterTerminateCount == pipeFlushAfterTerminateCount) { - return; - } - this.pipeFlushAfterTerminateCount = pipeFlushAfterTerminateCount; - logger.info("pipeFlushAfterTerminateCount is set to {}", pipeFlushAfterTerminateCount); - } - public double getPipeMetaReportMaxLogNumPerRound() { return pipeMetaReportMaxLogNumPerRound; } @@ -1693,30 +1604,6 @@ public void setPipeTsFilePinMaxLogIntervalRounds(int pipeTsFilePinMaxLogInterval "pipeTsFilePinMaxLogIntervalRounds is set to {}", pipeTsFilePinMaxLogIntervalRounds); } - public int getPipeWalPinMaxLogNumPerRound() { - return pipeWalPinMaxLogNumPerRound; - } - - public void setPipeWalPinMaxLogNumPerRound(int pipeWalPinMaxLogNumPerRound) { - if (this.pipeWalPinMaxLogNumPerRound == pipeWalPinMaxLogNumPerRound) { - return; - } - this.pipeWalPinMaxLogNumPerRound = pipeWalPinMaxLogNumPerRound; - logger.info("pipeWalPinMaxLogNumPerRound is set to {}", pipeWalPinMaxLogNumPerRound); - } - - public int getPipeWalPinMaxLogIntervalRounds() { - return pipeWalPinMaxLogIntervalRounds; - } - - public void setPipeWalPinMaxLogIntervalRounds(int pipeWalPinMaxLogIntervalRounds) { - if (this.pipeWalPinMaxLogIntervalRounds == pipeWalPinMaxLogIntervalRounds) { - return; - } - this.pipeWalPinMaxLogIntervalRounds = pipeWalPinMaxLogIntervalRounds; - logger.info("pipeWalPinMaxLogIntervalRounds is set to {}", pipeWalPinMaxLogIntervalRounds); - } - public boolean getPipeMemoryManagementEnabled() { return pipeMemoryManagementEnabled; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java index b54d6db4dab4..3c8d13bab54d 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/ProgressIndex.java @@ -215,7 +215,7 @@ protected static ProgressIndex blendProgressIndex( *

Notice:TotalOrderSumTuple is an ordered tuple, the larger the subscript the higher the * weight of the element when comparing sizes, e.g. (1, 2) is larger than (2, 1). */ - protected static class TotalOrderSumTuple implements Comparable { + public static class TotalOrderSumTuple implements Comparable { private final ImmutableList tuple; /** diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java index 8b02d85da5ae..8f6a24845aa5 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/consensus/index/impl/IoTProgressIndex.java @@ -109,14 +109,18 @@ public boolean isAfter(@Nonnull ProgressIndex progressIndex) { return false; } - final IoTProgressIndex thisIoTProgressIndex = this; final IoTProgressIndex thatIoTProgressIndex = (IoTProgressIndex) progressIndex; - return thatIoTProgressIndex.peerId2SearchIndex.entrySet().stream() - .noneMatch( - entry -> - !thisIoTProgressIndex.peerId2SearchIndex.containsKey(entry.getKey()) - || thisIoTProgressIndex.peerId2SearchIndex.get(entry.getKey()) - <= entry.getValue()); + boolean isEquals = true; + for (final Map.Entry entry : + thatIoTProgressIndex.peerId2SearchIndex.entrySet()) { + if (!peerId2SearchIndex.containsKey(entry.getKey()) + || peerId2SearchIndex.get(entry.getKey()) < entry.getValue()) { + return false; + } else if (peerId2SearchIndex.get(entry.getKey()) > entry.getValue()) { + isEquals = false; + } + } + return !isEquals; } finally { lock.readLock().unlock(); } @@ -204,15 +208,6 @@ public TotalOrderSumTuple getTotalOrderSumTuple() { } } - public int getPeerId2SearchIndexSize() { - lock.readLock().lock(); - try { - return peerId2SearchIndex.size(); - } finally { - lock.readLock().unlock(); - } - } - public static IoTProgressIndex deserializeFrom(ByteBuffer byteBuffer) { final IoTProgressIndex ioTProgressIndex = new IoTProgressIndex(); final int size = ReadWriteIOUtils.readInt(byteBuffer); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java index 9818b43acea5..d1ea88072521 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/PipeTaskAgent.java @@ -20,6 +20,7 @@ package org.apache.iotdb.commons.pipe.agent.task; import org.apache.iotdb.common.rpc.thrift.TPipeHeartbeatResp; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeConnectorCriticalException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; @@ -33,6 +34,7 @@ import org.apache.iotdb.commons.pipe.agent.task.meta.PipeTemporaryMetaInAgent; import org.apache.iotdb.commons.pipe.agent.task.progress.CommitterKey; import org.apache.iotdb.commons.pipe.agent.task.progress.PipeEventCommitManager; +import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.connector.limiter.PipeEndPointRateLimiter; import org.apache.iotdb.commons.subscription.config.SubscriptionConfig; import org.apache.iotdb.mpp.rpc.thrift.TPipeHeartbeatReq; @@ -44,6 +46,7 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -53,8 +56,10 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import java.util.stream.Collectors; /** @@ -343,7 +348,10 @@ protected TPushPipeMetaRespExceptionMessage handleDropPipeInternal(final String public List handlePipeMetaChanges( final List pipeMetaListFromCoordinator) { - acquireWriteLock(); + if (!tryWriteLockWithTimeOut( + CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() * 2L / 3)) { + return null; + } try { return handlePipeMetaChangesInternal(pipeMetaListFromCoordinator); } finally { @@ -425,7 +433,11 @@ protected List handlePipeMetaChangesInternal( } public void dropAllPipeTasks() { - acquireWriteLock(); + if (!tryWriteLockWithTimeOut( + TimeUnit.MILLISECONDS.toSeconds(PipeConfig.getInstance().getPipeMaxWaitFinishTime()))) { + LOGGER.info("Failed to acquire lock when dropping all pipe tasks, will skip dropping"); + return; + } try { dropAllPipeTasksInternal(); } finally { @@ -463,6 +475,11 @@ private boolean createPipe(final PipeMeta pipeMetaFromCoordinator) throws Illega final String pipeName = pipeMetaFromCoordinator.getStaticMeta().getPipeName(); final long creationTime = pipeMetaFromCoordinator.getStaticMeta().getCreationTime(); + calculateMemoryUsage( + pipeMetaFromCoordinator.getStaticMeta().getExtractorParameters(), + pipeMetaFromCoordinator.getStaticMeta().getProcessorParameters(), + pipeMetaFromCoordinator.getStaticMeta().getConnectorParameters()); + final PipeMeta existedPipeMeta = pipeMetaKeeper.getPipeMeta(pipeName); if (existedPipeMeta != null) { if (!checkBeforeCreatePipe(existedPipeMeta, pipeName, creationTime)) { @@ -481,7 +498,7 @@ private boolean createPipe(final PipeMeta pipeMetaFromCoordinator) throws Illega // Trigger create() method for each pipe task by parallel stream final long startTime = System.currentTimeMillis(); - pipeTasks.values().parallelStream().forEach(PipeTask::create); + runPipeTasks(pipeTasks.values(), PipeTask::create); LOGGER.info( "Create all pipe tasks on Pipe {} successfully within {} ms", pipeName, @@ -503,6 +520,13 @@ private boolean createPipe(final PipeMeta pipeMetaFromCoordinator) throws Illega return needToStartPipe; } + protected void calculateMemoryUsage( + final PipeParameters extractorParameters, + final PipeParameters processorParameters, + final PipeParameters connectorParameters) { + // do nothing + } + protected abstract Map buildPipeTasks(final PipeMeta pipeMetaFromCoordinator) throws IllegalPathException; @@ -535,7 +559,7 @@ protected boolean dropPipe(final String pipeName, final long creationTime) { // Trigger drop() method for each pipe task by parallel stream final long startTime = System.currentTimeMillis(); - pipeTasks.values().parallelStream().forEach(PipeTask::drop); + runPipeTasks(pipeTasks.values(), PipeTask::drop); LOGGER.info( "Drop all pipe tasks on Pipe {} successfully within {} ms", pipeName, @@ -573,7 +597,7 @@ protected boolean dropPipe(final String pipeName) { // Trigger drop() method for each pipe task by parallel stream final long startTime = System.currentTimeMillis(); - pipeTasks.values().parallelStream().forEach(PipeTask::drop); + runPipeTasks(pipeTasks.values(), PipeTask::drop); LOGGER.info( "Drop all pipe tasks on Pipe {} successfully within {} ms", pipeName, @@ -606,7 +630,7 @@ protected void startPipe(final String pipeName, final long creationTime) { // Trigger start() method for each pipe task by parallel stream final long startTime = System.currentTimeMillis(); - pipeTasks.values().parallelStream().forEach(PipeTask::start); + runPipeTasks(pipeTasks.values(), PipeTask::start); LOGGER.info( "Start all pipe tasks on Pipe {} successfully within {} ms", pipeName, @@ -645,7 +669,7 @@ private void stopPipe(final String pipeName, final long creationTime) { // Trigger stop() method for each pipe task by parallel stream final long startTime = System.currentTimeMillis(); - pipeTasks.values().parallelStream().forEach(PipeTask::stop); + runPipeTasks(pipeTasks.values(), PipeTask::stop); LOGGER.info( "Stop all pipe tasks on Pipe {} successfully within {} ms", pipeName, @@ -1044,7 +1068,10 @@ private void stopAllPipesWithCriticalExceptionInternal(final int currentNodeId) public void collectPipeMetaList(final TPipeHeartbeatReq req, final TPipeHeartbeatResp resp) throws TException { - acquireReadLock(); + if (!tryReadLockWithTimeOut( + CommonDescriptor.getInstance().getConfig().getDnConnectionTimeoutInMS() * 2L / 3)) { + return; + } try { collectPipeMetaListInternal(req, resp); } finally { @@ -1055,6 +1082,9 @@ public void collectPipeMetaList(final TPipeHeartbeatReq req, final TPipeHeartbea protected abstract void collectPipeMetaListInternal( final TPipeHeartbeatReq req, final TPipeHeartbeatResp resp) throws TException; + public abstract void runPipeTasks( + final Collection pipeTasks, final Consumer runSingle); + ///////////////////////// Maintain meta info ///////////////////////// public long getPipeCreationTime(final String pipeName) { @@ -1097,17 +1127,21 @@ public long getFloatingMemoryUsageInByte(final String pipeName) { : ((PipeTemporaryMetaInAgent) pipeMeta.getTemporaryMeta()).getFloatingMemoryUsageInByte(); } - public void addFloatingMemoryUsageInByte(final String pipeName, final long sizeInByte) { + public void addFloatingMemoryUsageInByte( + final String pipeName, final long creationTime, final long sizeInByte) { final PipeMeta pipeMeta = pipeMetaKeeper.getPipeMeta(pipeName); - if (Objects.nonNull(pipeMeta)) { + // To avoid stale pipe before alter + if (Objects.nonNull(pipeMeta) && pipeMeta.getStaticMeta().getCreationTime() == creationTime) { ((PipeTemporaryMetaInAgent) pipeMeta.getTemporaryMeta()) .addFloatingMemoryUsageInByte(sizeInByte); } } - public void decreaseFloatingMemoryUsageInByte(final String pipeName, final long sizeInByte) { + public void decreaseFloatingMemoryUsageInByte( + final String pipeName, final long creationTime, final long sizeInByte) { final PipeMeta pipeMeta = pipeMetaKeeper.getPipeMeta(pipeName); - if (Objects.nonNull(pipeMeta)) { + // To avoid stale pipe before alter + if (Objects.nonNull(pipeMeta) && pipeMeta.getStaticMeta().getCreationTime() == creationTime) { ((PipeTemporaryMetaInAgent) pipeMeta.getTemporaryMeta()) .decreaseFloatingMemoryUsageInByte(sizeInByte); } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/execution/PipeSubtaskExecutor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/execution/PipeSubtaskExecutor.java index 4ea7714962bf..f16c6387cf57 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/execution/PipeSubtaskExecutor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/execution/PipeSubtaskExecutor.java @@ -30,18 +30,24 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; + import java.util.Map; +import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; public abstract class PipeSubtaskExecutor { private static final Logger LOGGER = LoggerFactory.getLogger(PipeSubtaskExecutor.class); - private static final ExecutorService subtaskCallbackListeningExecutor = + private static final ExecutorService globalSubtaskCallbackListeningExecutor = IoTDBThreadPoolFactory.newSingleThreadExecutor( ThreadName.PIPE_SUBTASK_CALLBACK_EXECUTOR_POOL.getName()); + private final ExecutorService subtaskCallbackListeningExecutor; + protected final WrappedThreadPoolExecutor underlyingThreadPool; protected final ListeningExecutorService subtaskWorkerThreadPoolExecutor; @@ -49,16 +55,38 @@ public abstract class PipeSubtaskExecutor { private final int corePoolSize; private int runningSubtaskNumber; + private final String workingThreadName; + private final String callbackThreadName; + + protected PipeSubtaskExecutor( + final int corePoolSize, + final String workingThreadName, + final boolean disableLogInThreadPool) { + this(corePoolSize, workingThreadName, null, disableLogInThreadPool); + } protected PipeSubtaskExecutor( - final int corePoolSize, final ThreadName threadName, final boolean disableLogInThreadPool) { + final int corePoolSize, + final String workingThreadName, + final @Nullable String callbackThreadName, + final boolean disableLogInThreadPool) { + this.workingThreadName = workingThreadName; + this.callbackThreadName = + Objects.nonNull(callbackThreadName) + ? callbackThreadName + : ThreadName.PIPE_SUBTASK_CALLBACK_EXECUTOR_POOL.getName(); underlyingThreadPool = (WrappedThreadPoolExecutor) - IoTDBThreadPoolFactory.newFixedThreadPool(corePoolSize, threadName.getName()); + IoTDBThreadPoolFactory.newFixedThreadPool(corePoolSize, workingThreadName); if (disableLogInThreadPool) { underlyingThreadPool.disableErrorLog(); } subtaskWorkerThreadPoolExecutor = MoreExecutors.listeningDecorator(underlyingThreadPool); + subtaskCallbackListeningExecutor = + Objects.nonNull(callbackThreadName) + ? IoTDBThreadPoolFactory.newSingleThreadExecutor( + callbackThreadName, new ThreadPoolExecutor.DiscardPolicy()) + : globalSubtaskCallbackListeningExecutor; registeredIdSubtaskMapper = new ConcurrentHashMap<>(); @@ -151,6 +179,9 @@ public final synchronized void shutdown() { } subtaskWorkerThreadPoolExecutor.shutdown(); + if (subtaskCallbackListeningExecutor != globalSubtaskCallbackListeningExecutor) { + subtaskCallbackListeningExecutor.shutdown(); + } } public final boolean isShutdown() { @@ -171,7 +202,11 @@ protected final boolean hasAvailableThread() { // return getAvailableThreadCount() > 0; } - private int getAvailableThreadCount() { - return underlyingThreadPool.getCorePoolSize() - underlyingThreadPool.getActiveCount(); + public String getWorkingThreadName() { + return workingThreadName; + } + + public String getCallbackThreadName() { + return callbackThreadName; } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java index b37bd07d1d9b..7056b052a3ee 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitManager.java @@ -118,9 +118,29 @@ public void commit(final EnrichedEvent event, final CommitterKey committerKey) { } } } - if (committerKey == null || event.getCommitId() <= EnrichedEvent.NO_COMMIT_ID) { + if (committerKey == null) { return; } + if (event.hasMultipleCommitIds()) { + commitMultipleIds(committerKey, event); + } else { + commitSingleId(committerKey, event.getCommitId(), event); + } + } + + private void commitMultipleIds(final CommitterKey committerKey, final EnrichedEvent event) { + for (final long commitId : event.getCommitIds()) { + if (commitSingleId(committerKey, commitId, event)) { + return; + } + } + } + + private boolean commitSingleId( + final CommitterKey committerKey, final long commitId, final EnrichedEvent event) { + if (commitId <= EnrichedEvent.NO_COMMIT_ID) { + return false; + } final PipeEventCommitter committer = eventCommitterMap.get(committerKey); if (committer == null) { @@ -142,10 +162,11 @@ public void commit(final EnrichedEvent event, final CommitterKey committerKey) { Thread.currentThread().getStackTrace()); } } - return; + return false; } committer.commit(event); + return true; } private CommitterKey generateCommitterKey( diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java index 9479c7a752ac..61f23907819d 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/progress/PipeEventCommitter.java @@ -58,6 +58,11 @@ public synchronized long generateCommitId() { @SuppressWarnings("java:S899") public synchronized void commit(final EnrichedEvent event) { + if (event.hasMultipleCommitIds()) { + for (final EnrichedEvent dummyEvent : event.getDummyEventsForCommitIds()) { + commitQueue.offer(dummyEvent); + } + } commitQueue.offer(event); final int commitQueueSizeBeforeCommit = commitQueue.size(); @@ -83,12 +88,11 @@ public synchronized void commit(final EnrichedEvent event) { final EnrichedEvent e = commitQueue.peek(); if (e.getCommitId() <= lastCommitId.get()) { - LOGGER.warn( - "commit id must be monotonically increasing, current commit id: {}, last commit id: {}, event: {}, stack trace: {}", + LOGGER.info( + "commit id is not monotonically increasing, current commit id: {}, last commit id: {}, event: {}, may be because the tsFile has been compacted", e.getCommitId(), lastCommitId.get(), - e.coreReportMessage(), - Thread.currentThread().getStackTrace()); + e.coreReportMessage()); commitQueue.poll(); continue; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeAbstractConnectorSubtask.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeAbstractConnectorSubtask.java index 0ca1f1be8d95..427193c4f182 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeAbstractConnectorSubtask.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeAbstractConnectorSubtask.java @@ -34,16 +34,11 @@ import org.slf4j.LoggerFactory; import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicLong; public abstract class PipeAbstractConnectorSubtask extends PipeReportableSubtask { private static final Logger LOGGER = LoggerFactory.getLogger(PipeAbstractConnectorSubtask.class); - // To ensure that high-priority tasks can obtain object locks first, a counter is now used to save - // the number of high-priority tasks. - protected final AtomicLong highPriorityLockTaskCount = new AtomicLong(0); - // For output (transfer events to the target system in connector) protected PipeConnector outputPipeConnector; diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeReportableSubtask.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeReportableSubtask.java index aa50bdd75767..37916d8f25ec 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeReportableSubtask.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/agent/task/subtask/PipeReportableSubtask.java @@ -22,14 +22,20 @@ import org.apache.iotdb.commons.exception.pipe.PipeRuntimeConnectorRetryTimesConfigurableException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeCriticalException; import org.apache.iotdb.commons.exception.pipe.PipeRuntimeException; +import org.apache.iotdb.commons.pipe.config.PipeConfig; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.concurrent.atomic.AtomicLong; + public abstract class PipeReportableSubtask extends PipeSubtask { private static final Logger LOGGER = LoggerFactory.getLogger(PipeReportableSubtask.class); + // To ensure that high-priority tasks can obtain object locks first, a counter is now used to save + // the number of high-priority tasks. + protected final AtomicLong highPriorityLockTaskCount = new AtomicLong(0); protected PipeReportableSubtask(final String taskID, final long creationTime) { super(taskID, creationTime); @@ -84,7 +90,14 @@ private void onEnrichedEventFailure(final Throwable throwable) { throwable.getMessage(), throwable); try { - Thread.sleep(Math.min(1000L * retryCount.get(), 10000)); + synchronized (highPriorityLockTaskCount) { + // The wait operation will release the highPriorityLockTaskCount lock, so there will be + // no deadlock. + if (highPriorityLockTaskCount.get() == 0) { + highPriorityLockTaskCount.wait( + retryCount.get() * PipeConfig.getInstance().getPipeConnectorRetryIntervalMs()); + } + } } catch (final InterruptedException e) { LOGGER.warn( "Interrupted when retrying to execute subtask {} (creation time: {}, simple class: {})", @@ -151,7 +164,14 @@ private void onNonEnrichedEventFailure(final Throwable throwable) { throwable.getMessage(), throwable); try { - Thread.sleep(Math.min(1000L * retryCount.get(), 10000)); + synchronized (highPriorityLockTaskCount) { + // The wait operation will release the highPriorityLockTaskCount lock, so there will be + // no deadlock. + if (highPriorityLockTaskCount.get() == 0) { + highPriorityLockTaskCount.wait( + retryCount.get() * PipeConfig.getInstance().getPipeConnectorRetryIntervalMs()); + } + } } catch (final InterruptedException e) { LOGGER.warn( "Interrupted when retrying to execute subtask {} (creation time: {}, simple class: {})", diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java index 01019b0abcb3..c00c97f19684 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeConfig.java @@ -30,12 +30,6 @@ public class PipeConfig { private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); - /////////////////////////////// Data Synchronization /////////////////////////////// - - public int getPipeNonForwardingEventsProgressReportInterval() { - return COMMON_CONFIG.getPipeNonForwardingEventsProgressReportInterval(); - } - /////////////////////////////// File /////////////////////////////// public String getPipeHardlinkBaseDirName() { @@ -46,14 +40,6 @@ public String getPipeHardlinkTsFileDirName() { return COMMON_CONFIG.getPipeHardlinkTsFileDirName(); } - public String getPipeHardlinkWALDirName() { - return COMMON_CONFIG.getPipeHardlinkWALDirName(); - } - - public boolean getPipeHardLinkWALEnabled() { - return COMMON_CONFIG.getPipeHardLinkWALEnabled(); - } - public boolean getPipeFileReceiverFsyncEnabled() { return COMMON_CONFIG.getPipeFileReceiverFsyncEnabled(); } @@ -79,16 +65,44 @@ public double getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold() { return COMMON_CONFIG.getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold(); } - public double getPipeDataStructureWalMemoryProportion() { - return COMMON_CONFIG.getPipeDataStructureWalMemoryProportion(); + public double getPipeTotalFloatingMemoryProportion() { + return COMMON_CONFIG.getPipeTotalFloatingMemoryProportion(); } public double getPipeDataStructureBatchMemoryProportion() { return COMMON_CONFIG.getPipeDataStructureBatchMemoryProportion(); } - public double getPipeTotalFloatingMemoryProportion() { - return COMMON_CONFIG.getPipeTotalFloatingMemoryProportion(); + public boolean isPipeEnableMemoryCheck() { + return COMMON_CONFIG.isPipeEnableMemoryChecked(); + } + + public long PipeInsertNodeQueueMemory() { + return COMMON_CONFIG.getPipeInsertNodeQueueMemory(); + } + + public long getTsFileParserMemory() { + return COMMON_CONFIG.getPipeTsFileParserMemory(); + } + + public long getSinkBatchMemoryInsertNode() { + return COMMON_CONFIG.getPipeSinkBatchMemoryInsertNode(); + } + + public long getSinkBatchMemoryTsFile() { + return COMMON_CONFIG.getPipeSinkBatchMemoryTsFile(); + } + + public long getSendTsFileReadBuffer() { + return COMMON_CONFIG.getPipeSendTsFileReadBuffer(); + } + + public double getReservedMemoryPercentage() { + return COMMON_CONFIG.getPipeReservedMemoryPercentage(); + } + + public long getPipeMinimumReceiverMemory() { + return COMMON_CONFIG.getPipeMinimumReceiverMemory(); } /////////////////////////////// Subtask Connector /////////////////////////////// @@ -145,7 +159,7 @@ public long getPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes() { return COMMON_CONFIG.getPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes(); } - public int getPipeExtractorMatcherCacheSize() { + public long getPipeExtractorMatcherCacheSize() { return COMMON_CONFIG.getPipeExtractorMatcherCacheSize(); } @@ -331,50 +345,10 @@ public int getPipeMaxAllowedPendingTsFileEpochPerDataRegion() { return COMMON_CONFIG.getPipeMaxAllowedPendingTsFileEpochPerDataRegion(); } - public int getPipeMaxAllowedPinnedMemTableCount() { - return COMMON_CONFIG.getPipeMaxAllowedPinnedMemTableCount(); - } - public long getPipeMaxAllowedLinkedTsFileCount() { return COMMON_CONFIG.getPipeMaxAllowedLinkedTsFileCount(); } - public float getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage() { - return COMMON_CONFIG.getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage(); - } - - public long getPipeStuckRestartIntervalSeconds() { - return COMMON_CONFIG.getPipeStuckRestartIntervalSeconds(); - } - - public long getPipeStuckRestartMinIntervalMs() { - return COMMON_CONFIG.getPipeStuckRestartMinIntervalMs(); - } - - public boolean isPipeEpochKeepTsFileAfterStuckRestartEnabled() { - return COMMON_CONFIG.isPipeEpochKeepTsFileAfterStuckRestartEnabled(); - } - - public long getPipeFlushAfterTerminateCount() { - return COMMON_CONFIG.getPipeFlushAfterTerminateCount(); - } - - public long getPipeFlushAfterLastTerminateSeconds() { - return COMMON_CONFIG.getPipeFlushAfterLastTerminateSeconds(); - } - - public long getPipeStorageEngineFlushTimeIntervalMs() { - return COMMON_CONFIG.getPipeStorageEngineFlushTimeIntervalMs(); - } - - public int getPipeMaxAllowedRemainingInsertEventCountPerPipe() { - return COMMON_CONFIG.getPipeMaxAllowedRemainingInsertEventCountPerPipe(); - } - - public int getPipeMaxAllowedTotalRemainingInsertEventCount() { - return COMMON_CONFIG.getPipeMaxAllowedTotalRemainingInsertEventCount(); - } - /////////////////////////////// Logger /////////////////////////////// public double getPipeMetaReportMaxLogNumPerRound() { @@ -393,14 +367,6 @@ public int getPipeTsFilePinMaxLogIntervalRounds() { return COMMON_CONFIG.getPipeTsFilePinMaxLogIntervalRounds(); } - public int getPipeWalPinMaxLogNumPerRound() { - return COMMON_CONFIG.getPipeWalPinMaxLogNumPerRound(); - } - - public int getPipeWalPinMaxLogIntervalRounds() { - return COMMON_CONFIG.getPipeWalPinMaxLogIntervalRounds(); - } - /////////////////////////////// Memory /////////////////////////////// public boolean getPipeMemoryManagementEnabled() { @@ -460,14 +426,8 @@ public long getPipeEventReferenceEliminateIntervalSeconds() { private static final Logger LOGGER = LoggerFactory.getLogger(PipeConfig.class); public void printAllConfigs() { - LOGGER.info( - "PipeNonForwardingEventsProgressReportInterval: {}", - getPipeNonForwardingEventsProgressReportInterval()); - LOGGER.info("PipeHardlinkBaseDirName: {}", getPipeHardlinkBaseDirName()); LOGGER.info("PipeHardlinkTsFileDirName: {}", getPipeHardlinkTsFileDirName()); - LOGGER.info("PipeHardlinkWALDirName: {}", getPipeHardlinkWALDirName()); - LOGGER.info("PipeHardLinkWALEnabled: {}", getPipeHardLinkWALEnabled()); LOGGER.info("PipeFileReceiverFsyncEnabled: {}", getPipeFileReceiverFsyncEnabled()); LOGGER.info("PipeDataStructureTabletRowSize: {}", getPipeDataStructureTabletRowSize()); @@ -480,6 +440,16 @@ public void printAllConfigs() { getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold()); LOGGER.info("PipeTotalFloatingMemoryProportion: {}", getPipeTotalFloatingMemoryProportion()); + LOGGER.info( + "PipeDataStructureBatchMemoryProportion: {}", getPipeDataStructureBatchMemoryProportion()); + LOGGER.info("IsPipeEnableMemoryCheck: {}", isPipeEnableMemoryCheck()); + LOGGER.info("PipeTsFileParserMemory: {}", getTsFileParserMemory()); + LOGGER.info("SinkBatchMemoryInsertNode: {}", getSinkBatchMemoryInsertNode()); + LOGGER.info("SinkBatchMemoryTsFile: {}", getSinkBatchMemoryTsFile()); + LOGGER.info("SendTsFileReadBuffer: {}", getSendTsFileReadBuffer()); + LOGGER.info("PipeReservedMemoryPercentage: {}", getReservedMemoryPercentage()); + LOGGER.info("PipeMinimumReceiverMemory: {}", getPipeMinimumReceiverMemory()); + LOGGER.info( "PipeRealTimeQueuePollTsFileThreshold: {}", getPipeRealTimeQueuePollTsFileThreshold()); LOGGER.info( @@ -612,33 +582,12 @@ public void printAllConfigs() { LOGGER.info( "PipeMaxAllowedPendingTsFileEpochPerDataRegion: {}", getPipeMaxAllowedPendingTsFileEpochPerDataRegion()); - LOGGER.info("PipeMaxAllowedPinnedMemTableCount: {}", getPipeMaxAllowedPinnedMemTableCount()); LOGGER.info("PipeMaxAllowedLinkedTsFileCount: {}", getPipeMaxAllowedLinkedTsFileCount()); - LOGGER.info( - "PipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage: {}", - getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage()); - LOGGER.info("PipeStuckRestartIntervalSeconds: {}", getPipeStuckRestartIntervalSeconds()); - LOGGER.info("PipeStuckRestartMinIntervalMs: {}", getPipeStuckRestartMinIntervalMs()); - LOGGER.info( - "PipeEpochKeepTsFileAfterStuckRestartEnabled: {}", - isPipeEpochKeepTsFileAfterStuckRestartEnabled()); - LOGGER.info("PipeFlushAfterTerminateCount: {}", getPipeFlushAfterTerminateCount()); - LOGGER.info("PipeFlushAfterLastTerminateSeconds: {}", getPipeFlushAfterLastTerminateSeconds()); - LOGGER.info( - "PipeStorageEngineFlushTimeIntervalMs: {}", getPipeStorageEngineFlushTimeIntervalMs()); - LOGGER.info( - "PipeMaxAllowedRemainingInsertEventCountPerPipe: {}", - getPipeMaxAllowedRemainingInsertEventCountPerPipe()); - LOGGER.info( - "PipeMaxAllowedTotalRemainingInsertEventCount: {}", - getPipeMaxAllowedTotalRemainingInsertEventCount()); LOGGER.info("PipeMetaReportMaxLogNumPerRound: {}", getPipeMetaReportMaxLogNumPerRound()); LOGGER.info("PipeMetaReportMaxLogIntervalRounds: {}", getPipeMetaReportMaxLogIntervalRounds()); LOGGER.info("PipeTsFilePinMaxLogNumPerRound: {}", getPipeTsFilePinMaxLogNumPerRound()); LOGGER.info("PipeTsFilePinMaxLogIntervalRounds: {}", getPipeTsFilePinMaxLogIntervalRounds()); - LOGGER.info("PipeWalPinMaxLogNumPerRound: {}", getPipeWalPinMaxLogNumPerRound()); - LOGGER.info("PipeWalPinMaxLogIntervalRounds: {}", getPipeWalPinMaxLogIntervalRounds()); LOGGER.info("PipeMemoryManagementEnabled: {}", getPipeMemoryManagementEnabled()); LOGGER.info("PipeMemoryAllocateMaxRetries: {}", getPipeMemoryAllocateMaxRetries()); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java index d50eacaf0365..15a1940dbbff 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/PipeDescriptor.java @@ -46,13 +46,6 @@ public static void loadPipeStaticConfig(CommonConfig config, TrimProperties prop config.setPipeHardlinkTsFileDirName( properties.getProperty( "pipe_hardlink_tsfile_dir_name", config.getPipeHardlinkTsFileDirName())); - config.setPipeHardlinkWALDirName( - properties.getProperty("pipe_hardlink_wal_dir_name", config.getPipeHardlinkWALDirName())); - config.setPipeHardLinkWALEnabled( - Boolean.parseBoolean( - properties.getProperty( - "pipe_hardlink_wal_enabled", - Boolean.toString(config.getPipeHardLinkWALEnabled())))); int pipeSubtaskExecutorMaxThreadNum = Integer.parseInt( properties.getProperty( @@ -129,17 +122,6 @@ public static void loadPipeStaticConfig(CommonConfig config, TrimProperties prop properties.getProperty( "pipe_tsfile_pin_max_log_interval_rounds", String.valueOf(config.getPipeTsFilePinMaxLogIntervalRounds())))); - config.setPipeWalPinMaxLogNumPerRound( - Integer.parseInt( - properties.getProperty( - "pipe_wal_pin_max_log_num_per_round", - String.valueOf(config.getPipeWalPinMaxLogNumPerRound())))); - config.setPipeWalPinMaxLogIntervalRounds( - Integer.parseInt( - properties.getProperty( - "pipe_wal_pin_max_log_interval_rounds", - String.valueOf(config.getPipeWalPinMaxLogIntervalRounds())))); - config.setPipeMemoryManagementEnabled( Boolean.parseBoolean( properties.getProperty( @@ -199,14 +181,7 @@ public static void loadPipeStaticConfig(CommonConfig config, TrimProperties prop .trim())); } - public static void loadPipeInternalConfig(CommonConfig config, TrimProperties properties) - throws IOException { - config.setPipeNonForwardingEventsProgressReportInterval( - Integer.parseInt( - properties.getProperty( - "pipe_non_forwarding_events_progress_report_interval", - Integer.toString(config.getPipeNonForwardingEventsProgressReportInterval())))); - + public static void loadPipeInternalConfig(CommonConfig config, TrimProperties properties) { config.setPipeFileReceiverFsyncEnabled( Boolean.parseBoolean( properties.getProperty( @@ -235,11 +210,6 @@ public static void loadPipeInternalConfig(CommonConfig config, TrimProperties pr "pipe_data_structure_ts_file_memory_block_allocation_reject_threshold", String.valueOf( config.getPipeDataStructureTsFileMemoryBlockAllocationRejectThreshold())))); - config.setPipeDataStructureWalMemoryProportion( - Double.parseDouble( - properties.getProperty( - "pipe_data_structure_wal_memory_proportion", - String.valueOf(config.getPipeDataStructureWalMemoryProportion())))); config.setPipeDataStructureBatchMemoryProportion( Double.parseDouble( properties.getProperty( @@ -251,6 +221,45 @@ public static void loadPipeInternalConfig(CommonConfig config, TrimProperties pr "pipe_total_floating_memory_proportion", String.valueOf(config.getPipeTotalFloatingMemoryProportion())))); + config.setIsPipeEnableMemoryChecked( + Boolean.parseBoolean( + properties.getProperty( + "pipe_enable_memory_checked", String.valueOf(config.isPipeEnableMemoryChecked())))); + config.setPipeInsertNodeQueueMemory( + Long.parseLong( + properties.getProperty( + "pipe_insert_node_queue_memory", + String.valueOf(config.getPipeInsertNodeQueueMemory())))); + config.setPipeTsFileParserMemory( + Long.parseLong( + properties.getProperty( + "pipe_tsfile_parser_memory", String.valueOf(config.getPipeTsFileParserMemory())))); + config.setPipeSinkBatchMemoryInsertNode( + Long.parseLong( + properties.getProperty( + "pipe_sink_batch_memory_insert_node", + String.valueOf(config.getPipeSinkBatchMemoryInsertNode())))); + config.setPipeSinkBatchMemoryTsFile( + Long.parseLong( + properties.getProperty( + "pipe_sink_batch_memory_ts_file", + String.valueOf(config.getPipeSinkBatchMemoryTsFile())))); + config.setPipeSendTsFileReadBuffer( + Long.parseLong( + properties.getProperty( + "pipe_send_tsfile_read_buffer", + String.valueOf(config.getPipeSendTsFileReadBuffer())))); + config.setPipeReservedMemoryPercentage( + Double.parseDouble( + properties.getProperty( + "pipe_reserved_memory_percentage", + String.valueOf(config.getPipeReservedMemoryPercentage())))); + config.setPipeMinimumReceiverMemory( + Long.parseLong( + properties.getProperty( + "pipe_minimum_receiver_memory", + String.valueOf(config.getPipeMinimumReceiverMemory())))); + config.setPipeRealTimeQueuePollTsFileThreshold( Integer.parseInt( Optional.ofNullable( @@ -312,9 +321,10 @@ public static void loadPipeInternalConfig(CommonConfig config, TrimProperties pr String.valueOf( config .getPipeExtractorAssignerDisruptorRingBufferEntrySizeInBytes()))))); + config.setPipeExtractorMatcherCacheSize( Integer.parseInt( - Optional.ofNullable(properties.getProperty("pipe_source_matcher_cache_size")) + Optional.ofNullable(properties.getProperty("pipe_extractor_matcher_cache_size")) .orElse( properties.getProperty( "pipe_extractor_matcher_cache_size", @@ -433,61 +443,11 @@ public static void loadPipeInternalConfig(CommonConfig config, TrimProperties pr properties.getProperty( "pipe_max_allowed_pending_tsfile_epoch_per_data_region", String.valueOf(config.getPipeMaxAllowedPendingTsFileEpochPerDataRegion())))); - config.setPipeMaxAllowedPinnedMemTableCount( - Integer.parseInt( - properties.getProperty( - "pipe_max_allowed_pinned_memtable_count", - String.valueOf(config.getPipeMaxAllowedPinnedMemTableCount())))); config.setPipeMaxAllowedLinkedTsFileCount( Long.parseLong( properties.getProperty( "pipe_max_allowed_linked_tsfile_count", String.valueOf(config.getPipeMaxAllowedLinkedTsFileCount())))); - config.setPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage( - Float.parseFloat( - properties.getProperty( - "pipe_max_allowed_linked_deleted_tsfile_disk_usage_percentage", - String.valueOf(config.getPipeMaxAllowedLinkedDeletedTsFileDiskUsagePercentage())))); - config.setPipeStuckRestartIntervalSeconds( - Long.parseLong( - properties.getProperty( - "pipe_stuck_restart_interval_seconds", - String.valueOf(config.getPipeStuckRestartIntervalSeconds())))); - config.setPipeMaxAllowedRemainingInsertEventCountPerPipe( - Integer.parseInt( - properties.getProperty( - "pipe_max_allowed_remaining_insert_event_count_per_pipe", - String.valueOf(config.getPipeMaxAllowedRemainingInsertEventCountPerPipe())))); - config.setPipeMaxAllowedTotalRemainingInsertEventCount( - Integer.parseInt( - properties.getProperty( - "pipe_max_allowed_total_remaining_insert_event_count", - String.valueOf(config.getPipeMaxAllowedTotalRemainingInsertEventCount())))); - config.setPipeStuckRestartMinIntervalMs( - Long.parseLong( - properties.getProperty( - "pipe_stuck_restart_min_interval_ms", - String.valueOf(config.getPipeStuckRestartMinIntervalMs())))); - config.setPipeFlushAfterLastTerminateSeconds( - Long.parseLong( - properties.getProperty( - "pipe_flush_after_last_terminate_seconds", - String.valueOf(config.getPipeFlushAfterLastTerminateSeconds())))); - config.setPipeFlushAfterTerminateCount( - Long.parseLong( - properties.getProperty( - "pipe_flush_after_terminate_count", - String.valueOf(config.getPipeFlushAfterTerminateCount())))); - config.setPipeEpochKeepTsFileAfterStuckRestartEnabled( - Boolean.parseBoolean( - properties.getProperty( - "pipe_epoch_keep_tsfile_after_stuck_restart_enabled", - String.valueOf(config.isPipeEpochKeepTsFileAfterStuckRestartEnabled())))); - config.setPipeStorageEngineFlushTimeIntervalMs( - Long.parseLong( - properties.getProperty( - "pipe_storage_engine_flush_time_interval_ms", - String.valueOf(config.getPipeStorageEngineFlushTimeIntervalMs())))); config.setPipeMemoryAllocateMaxRetries( Integer.parseInt( diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java index 394ff7ae9d6a..c67809347037 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java @@ -75,7 +75,7 @@ public class PipeConnectorConstant { public static final String CONNECTOR_IOTDB_BATCH_SIZE_KEY = "connector.batch.size-bytes"; public static final String SINK_IOTDB_BATCH_SIZE_KEY = "sink.batch.size-bytes"; public static final long CONNECTOR_IOTDB_PLAIN_BATCH_SIZE_DEFAULT_VALUE = MB; - public static final long CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE = 2 * MB; + public static final long CONNECTOR_IOTDB_TS_FILE_BATCH_SIZE_DEFAULT_VALUE = MB; public static final String CONNECTOR_IOTDB_USER_KEY = "connector.user"; public static final String SINK_IOTDB_USER_KEY = "sink.user"; diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java index 909d43be46e5..c61abbec2895 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFilePieceReq.java @@ -82,7 +82,6 @@ protected final PipeTransferFilePieceReq translateFromTPipeTransferReq( version = transferReq.version; type = transferReq.type; - body = transferReq.body; return this; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java index 0bc0342cc998..157d73e06153 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV1.java @@ -71,7 +71,6 @@ public PipeTransferFileSealReqV1 translateFromTPipeTransferReq(TPipeTransferReq version = req.version; type = req.type; - body = req.body; return this; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java index 3ed9999a4e81..86cce0245ec9 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferFileSealReqV2.java @@ -109,7 +109,6 @@ public PipeTransferFileSealReqV2 translateFromTPipeTransferReq(TPipeTransferReq version = req.version; type = req.type; - body = req.body; return this; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java index 7c0330a7a834..d4fb192c0358 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV1Req.java @@ -62,7 +62,6 @@ protected final PipeTransferHandshakeV1Req translateFromTPipeTransferReq( version = transferReq.version; type = transferReq.type; - body = transferReq.body; return this; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java index 8ed63cda5465..c6fd0f977a10 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferHandshakeV2Req.java @@ -74,7 +74,6 @@ protected final PipeTransferHandshakeV2Req translateFromTPipeTransferReq( version = transferReq.version; type = transferReq.type; - body = transferReq.body; return this; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java index 4df6008400d7..1041cb283c52 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/payload/thrift/request/PipeTransferSliceReq.java @@ -128,7 +128,6 @@ public static PipeTransferSliceReq fromTPipeTransferReq(final TPipeTransferReq t sliceReq.version = transferReq.version; sliceReq.type = transferReq.type; - sliceReq.body = transferReq.body; return sliceReq; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java index 561b61b2391c..a7dacb49553f 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java @@ -20,6 +20,7 @@ package org.apache.iotdb.commons.pipe.connector.protocol; import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant; import org.apache.iotdb.commons.pipe.config.plugin.env.PipeTaskConnectorRuntimeEnvironment; import org.apache.iotdb.commons.pipe.connector.compressor.PipeCompressor; import org.apache.iotdb.commons.pipe.connector.compressor.PipeCompressorConfig; @@ -176,6 +177,7 @@ public abstract class IoTDBConnector implements PipeConnector { private final AtomicLong totalCompressedSize = new AtomicLong(0); protected String attributeSortedString; protected Timer compressionTimer; + protected boolean isRealtimeFirst; @Override public void validate(final PipeParameterValidator validator) throws Exception { @@ -465,6 +467,16 @@ public void customize( "IoTDBConnector {} = {}", CONNECTOR_EXCEPTION_DATA_CONVERT_ON_TYPE_MISMATCH_KEY, shouldReceiverConvertOnTypeMismatch); + isRealtimeFirst = + parameters.getBooleanOrDefault( + Arrays.asList( + PipeConnectorConstant.CONNECTOR_REALTIME_FIRST_KEY, + PipeConnectorConstant.SINK_REALTIME_FIRST_KEY), + PipeConnectorConstant.CONNECTOR_REALTIME_FIRST_DEFAULT_VALUE); + LOGGER.info( + "IoTDBConnector {} = {}", + PipeConnectorConstant.CONNECTOR_REALTIME_FIRST_KEY, + isRealtimeFirst); } protected LinkedHashSet parseNodeUrls(final PipeParameters parameters) diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java index a9dae7719672..374bc5446091 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/event/EnrichedEvent.java @@ -32,6 +32,7 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; @@ -395,10 +396,22 @@ public CommitterKey getCommitterKey() { return committerKey; } + public boolean hasMultipleCommitIds() { + return false; + } + public long getCommitId() { return commitId; } + public List getDummyEventsForCommitIds() { + return Collections.emptyList(); + } + + public List getCommitIds() { + return Collections.singletonList(commitId); + } + public void onCommitted() { onCommittedHooks.forEach(Supplier::get); } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/resource/log/PipeLogStatus.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/resource/log/PipeLogStatus.java index 5427de9a8316..a30b12ab5eb0 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/resource/log/PipeLogStatus.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/resource/log/PipeLogStatus.java @@ -42,7 +42,7 @@ class PipeLogStatus { synchronized Optional schedule(final int scale) { if (currentRounds.incrementAndGet() - >= Math.min((int) Math.ceil((double) scale / maxAverageScale), maxLogInterval)) { + >= Math.min((int) Math.ceil(scale / maxAverageScale), maxLogInterval)) { currentRounds.set(0); return Optional.of(logger); } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java index 145b4adaf94e..cd91c788884f 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java @@ -139,6 +139,10 @@ public enum Metric { UNTRANSFERRED_TABLET_COUNT("untransferred_tablet_count"), UNTRANSFERRED_TSFILE_COUNT("untransferred_tsfile_count"), UNTRANSFERRED_HEARTBEAT_COUNT("untransferred_heartbeat_count"), + PIPE_INSERT_NODE_BATCH_SIZE("pipe_insert_node_batch_size"), + PIPE_TSFILE_BATCH_SIZE("pipe_tsfile_batch_size"), + PIPE_INSERT_NODE_BATCH_TIME_COST("pipe_insert_node_batch_time_cost"), + PIPE_TSFILE_BATCH_TIME_COST("pipe_tsfile_batch_time_cost"), PIPE_CONNECTOR_BATCH_SIZE("pipe_connector_batch_size"), PIPE_PENDING_HANDLERS_SIZE("pipe_pending_handlers_size"), PIPE_TOTAL_UNCOMPRESSED_SIZE("pipe_total_uncompressed_size"), @@ -156,12 +160,8 @@ public enum Metric { PIPE_CONNECTOR_TSFILE_TRANSFER("pipe_connector_tsfile_transfer"), PIPE_CONNECTOR_HEARTBEAT_TRANSFER("pipe_connector_heartbeat_transfer"), PIPE_HEARTBEAT_EVENT("pipe_heartbeat_event"), - PIPE_WAL_INSERT_NODE_CACHE_HIT_RATE("pipe_wal_insert_node_cache_hit_rate"), - PIPE_WAL_INSERT_NODE_CACHE_HIT_COUNT("pipe_wal_insert_node_cache_hit_count"), - PIPE_WAL_INSERT_NODE_CACHE_REQUEST_COUNT("pipe_wal_insert_node_cache_request_count"), PIPE_EXTRACTOR_TSFILE_EPOCH_STATE("pipe_extractor_tsfile_epoch_state"), PIPE_MEM("pipe_mem"), - PIPE_PINNED_MEMTABLE_COUNT("pipe_pinned_memtable_count"), PIPE_LINKED_TSFILE_COUNT("pipe_linked_tsfile_count"), PIPE_LINKED_TSFILE_SIZE("pipe_linked_tsfile_size"), PIPE_PHANTOM_REFERENCE_COUNT("pipe_phantom_reference_count"), @@ -177,6 +177,7 @@ public enum Metric { PIPE_INSERT_NODE_EVENT_TRANSFER_TIME("pipe_insert_node_event_transfer_time"), PIPE_TSFILE_EVENT_TRANSFER_TIME("pipe_tsfile_event_transfer_time"), PIPE_DATANODE_EVENT_TRANSFER("pipe_datanode_event_transfer"), + PIPE_FLOATING_MEMORY_USAGE("pipe_floating_memory_usage"), PIPE_CONFIG_LINKED_QUEUE_SIZE("pipe_config_linked_queue_size"), UNTRANSFERRED_CONFIG_COUNT("untransferred_config_count"), PIPE_CONNECTOR_CONFIG_TRANSFER("pipe_connector_config_transfer"),