Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ public HikariPooledConnectionProvider(
public boolean acceptsUrl(
@NonNull String protocol, @NonNull HostSpec hostSpec, @NonNull Properties props) {
final RdsUrlType urlType = rdsUtils.identifyRdsType(hostSpec.getHost());
return RdsUrlType.RDS_INSTANCE.equals(urlType);
return RdsUrlType.RDS_INSTANCE.equals(urlType) || hostSpec.getHost().contains(".proxied");
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ public class FailoverConnectionPlugin extends AbstractConnectionPlugin {
private final PluginService pluginService;
protected final Properties properties;
protected boolean enableFailoverSetting;
protected boolean enableConnectFailover;
protected int failoverTimeoutMsSetting;
protected int failoverClusterTopologyRefreshRateMsSetting;
protected int failoverWriterReconnectIntervalMsSetting;
Expand Down Expand Up @@ -137,6 +138,13 @@ public class FailoverConnectionPlugin extends AbstractConnectionPlugin {
"enableClusterAwareFailover", "true",
"Enable/disable cluster-aware failover logic");

public static final AwsWrapperProperty ENABLE_CONNECT_FAILOVER =
new AwsWrapperProperty(
"enableConnectFailover", "false",
"Enable/disable cluster-aware failover if the initial connection to the database fails. "
+ " Note that this may result in a connection to a different instance in the cluster than was specified "
+ "by the URL.");

public static final AwsWrapperProperty FAILOVER_MODE =
new AwsWrapperProperty(
"failoverMode", null,
Expand Down Expand Up @@ -353,6 +361,7 @@ public boolean isFailoverEnabled() {

private void initSettings() {
this.enableFailoverSetting = ENABLE_CLUSTER_AWARE_FAILOVER.getBoolean(this.properties);
this.enableConnectFailover = ENABLE_CONNECT_FAILOVER.getBoolean(this.properties);
this.failoverTimeoutMsSetting = FAILOVER_TIMEOUT_MS.getInteger(this.properties);
this.failoverClusterTopologyRefreshRateMsSetting =
FAILOVER_CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(this.properties);
Expand Down Expand Up @@ -767,15 +776,34 @@ public Connection connect(
final boolean isInitialConnection,
final JdbcCallable<Connection, SQLException> connectFunc)
throws SQLException {
return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, connectFunc);
return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, connectFunc, false);
}

private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Properties props,
boolean isInitialConnection, JdbcCallable<Connection, SQLException> connectFunc)
boolean isInitialConnection, JdbcCallable<Connection, SQLException> connectFunc, boolean isForceConnect)
throws SQLException {
final Connection conn =
this.staleDnsHelper.getVerifiedConnection(isInitialConnection, this.hostListProviderService,
driverProtocol, hostSpec, props, connectFunc);

Connection conn = null;
try {
conn =
this.staleDnsHelper.getVerifiedConnection(isInitialConnection, this.hostListProviderService,
driverProtocol, hostSpec, props, connectFunc);
} catch (final SQLException e) {
if (!this.enableConnectFailover || isForceConnect || !shouldExceptionTriggerConnectionSwitch(e)) {
throw e;
}

try {
failover(this.pluginService.getCurrentHostSpec());
} catch (FailoverSuccessSQLException failoverSuccessException) {
conn = this.pluginService.getCurrentConnection();
}
}

if (conn == null) {
// This should be unreachable, the above logic will either get a connection successfully or throw an exception.
throw new SQLException(Messages.get("Failover.unableToConnect"));
}

if (isInitialConnection) {
this.pluginService.refreshHostList(conn);
Expand All @@ -792,6 +820,6 @@ public Connection forceConnect(
final boolean isInitialConnection,
final JdbcCallable<Connection, SQLException> forceConnectFunc)
throws SQLException {
return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, forceConnectFunc);
return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, forceConnectFunc, true);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ ExecutionTimeConnectionPlugin.executionTime=Executed {0} in {1} nanos.
Failover.transactionResolutionUnknownError=Transaction resolution unknown. Please re-configure session state if required and try restarting the transaction.
Failover.connectionChangedError=The active SQL connection has changed due to a connection failure. Please re-configure session state if required.
Failover.parameterValue={0}={1}
Failover.unableToConnect=Unable to establish a SQL connection due to an unexpected error.
Failover.unableToConnectToWriter=Unable to establish SQL connection to the writer instance.
Failover.unableToConnectToReader=Unable to establish SQL connection to the reader instance.
Failover.detectedException=Detected an exception while executing a command: {0}
Expand Down
184 changes: 181 additions & 3 deletions wrapper/src/test/java/integration/container/tests/HikariTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@
package integration.container.tests;

import static integration.util.AuroraTestUtility.executeWithTimeout;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static software.amazon.jdbc.PropertyDefinition.PLUGINS;
Expand Down Expand Up @@ -47,6 +49,7 @@
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.SQLTransientConnectionException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Properties;
Expand All @@ -57,8 +60,11 @@
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.TestTemplate;
import org.junit.jupiter.api.extension.ExtendWith;
import software.amazon.jdbc.ConnectionProviderManager;
import software.amazon.jdbc.HikariPooledConnectionProvider;
import software.amazon.jdbc.PropertyDefinition;
import software.amazon.jdbc.ds.AwsWrapperDataSource;
import software.amazon.jdbc.hostlistprovider.RdsHostListProvider;
import software.amazon.jdbc.plugin.efm2.HostMonitoringConnectionPlugin;
import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin;
import software.amazon.jdbc.plugin.failover.FailoverFailedSQLException;
Expand Down Expand Up @@ -173,7 +179,7 @@ public void testFailoverLostConnection() throws SQLException {
FAILOVER_TIMEOUT_MS.set(customProps, Integer.toString(1));
PropertyDefinition.SOCKET_TIMEOUT.set(customProps, String.valueOf(TimeUnit.SECONDS.toMillis(1)));

try (final HikariDataSource dataSource = createDataSource(customProps)) {
try (final HikariDataSource dataSource = createHikariDataSource(customProps)) {

try (Connection conn = dataSource.getConnection()) {
assertTrue(conn.isValid(5));
Expand Down Expand Up @@ -224,7 +230,7 @@ public void testEFMFailover() throws SQLException {
LOGGER.fine("Instance to fail over to: " + readerIdentifier);

ProxyHelper.enableConnectivity(writerIdentifier);
try (final HikariDataSource dataSource = createDataSource(null)) {
try (final HikariDataSource dataSource = createHikariDataSource(null)) {

// Get a valid connection, then make it fail over to a different instance
try (Connection conn = dataSource.getConnection()) {
Expand Down Expand Up @@ -252,7 +258,179 @@ public void testEFMFailover() throws SQLException {
}
}

private HikariDataSource createDataSource(final Properties customProps) {
/**
* After successfully opening and returning a connection to the Hikari pool, writer failover is triggered when
* getConnection is called.
*/
@TestTemplate
@EnableOnTestFeature({TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED})
@EnableOnNumOfInstances(min = 2)
public void testInternalPools_driverWriterFailoverOnGetConnectionInvocation()
throws SQLException, InterruptedException {
final AuroraTestUtility auroraUtil = AuroraTestUtility.getUtility();
final TestProxyDatabaseInfo proxyInfo = TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo();
final List<TestInstanceInfo> instances = proxyInfo.getInstances();
final TestInstanceInfo reader = instances.get(1);
final String readerId = reader.getInstanceId();

setupInternalConnectionPools();
try {
final Properties targetDataSourceProps = new Properties();
targetDataSourceProps.setProperty(FailoverConnectionPlugin.FAILOVER_MODE.name, "strict-writer");
final AwsWrapperDataSource ds = createWrapperDataSource(reader, proxyInfo, targetDataSourceProps);

// Open connection and then return it to the pool
Connection conn = ds.getConnection();
assertEquals(readerId, auroraUtil.queryInstanceId(conn));
conn.close();

ProxyHelper.disableConnectivity(reader.getInstanceId());
// Hikari's 'com.zaxxer.hikari.aliveBypassWindowMs' property is set to 500ms by default. We need to wait this long
// to trigger Hikari's validation attempts when HikariDatasource#getConnection is called. These attempts will fail
// and Hikari will throw an exception, which should trigger failover.
TimeUnit.MILLISECONDS.sleep(500);

// Driver will fail over internally and return a connection to another node.
conn = ds.getConnection();
// Assert that we connected to a different node.
assertNotEquals(readerId, auroraUtil.queryInstanceId(conn));
} finally {
ConnectionProviderManager.releaseResources();
}
}

/**
* After successfully opening and returning connections to the Hikari pool, reader failover is triggered when
* getConnection is called.
*/
@TestTemplate
@EnableOnTestFeature({TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED})
@EnableOnNumOfInstances(min = 2)
public void testInternalPools_driverReaderFailoverOnGetConnectionInvocation()
throws SQLException, InterruptedException {
final AuroraTestUtility auroraUtil = AuroraTestUtility.getUtility();
final TestProxyDatabaseInfo proxyInfo = TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo();
final List<TestInstanceInfo> instances = proxyInfo.getInstances();
final TestInstanceInfo writer = instances.get(0);
final String writerId = writer.getInstanceId();

setupInternalConnectionPools();
try {
final Properties targetDataSourceProps = new Properties();
targetDataSourceProps.setProperty(FailoverConnectionPlugin.FAILOVER_MODE.name, "strict-reader");
final AwsWrapperDataSource ds = createWrapperDataSource(writer, proxyInfo, targetDataSourceProps);

// Open some connections.
List<Connection> connections = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Connection conn = ds.getConnection();
connections.add(conn);
}

// Return the opened connections to the pool.
for (Connection conn : connections) {
conn.close();
}

ProxyHelper.disableConnectivity(writer.getInstanceId());
// Hikari's 'com.zaxxer.hikari.aliveBypassWindowMs' property is set to 500ms by default. We need to wait this long
// to trigger Hikari's validation attempts when HikariDatasource#getConnection is called. These attempts will fail
// and Hikari will throw an exception, which should trigger failover.
TimeUnit.MILLISECONDS.sleep(500);

// Driver will fail over internally and return a connection to another node.
try (Connection conn = ds.getConnection()) {
// Assert that we connected to a different node.
assertNotEquals(writerId, auroraUtil.queryInstanceId(conn));
}
} finally {
ConnectionProviderManager.releaseResources();
}
}

/**
* After successfully opening and returning a connection to the Hikari pool, writer failover is triggered when
* getConnection is called. Since the cluster only has one instance and the instance stays down, failover fails.
*/
@TestTemplate
@EnableOnTestFeature({TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED})
@EnableOnNumOfInstances(max = 1)
public void testInternalPools_driverWriterFailoverOnGetConnectionInvocation_singleInstance()
throws SQLException, InterruptedException {
final AuroraTestUtility auroraUtil = AuroraTestUtility.getUtility();
final TestProxyDatabaseInfo proxyInfo = TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo();
final List<TestInstanceInfo> instances = proxyInfo.getInstances();
final TestInstanceInfo writer = instances.get(0);
final String writerId = writer.getInstanceId();

setupInternalConnectionPools();
try {
final Properties targetDataSourceProps = new Properties();
targetDataSourceProps.setProperty(FailoverConnectionPlugin.FAILOVER_MODE.name, "strict-writer");
targetDataSourceProps.setProperty(FailoverConnectionPlugin.FAILOVER_TIMEOUT_MS.name, "5000");
final AwsWrapperDataSource ds = createWrapperDataSource(writer, proxyInfo, targetDataSourceProps);

// Open connection and then return it to the pool
Connection conn = ds.getConnection();
assertEquals(writerId, auroraUtil.queryInstanceId(conn));
conn.close();

ProxyHelper.disableAllConnectivity();
// Hikari's 'com.zaxxer.hikari.aliveBypassWindowMs' property is set to 500ms by default. We need to wait this long
// to trigger Hikari's validation attempts when HikariDatasource#getConnection is called. These attempts will fail
// and Hikari will throw an exception, which should trigger failover.
TimeUnit.MILLISECONDS.sleep(500);

// Driver will attempt to fail over internally, but the node is still down, so it fails.
assertThrows(FailoverFailedSQLException.class, ds::getConnection);
} finally {
ConnectionProviderManager.releaseResources();
}
}

private static AwsWrapperDataSource createWrapperDataSource(TestInstanceInfo instanceInfo,
TestProxyDatabaseInfo proxyInfo, Properties targetDataSourceProps) {
targetDataSourceProps.setProperty(PropertyDefinition.PLUGINS.name, "auroraConnectionTracker,failover,efm");
targetDataSourceProps.setProperty(FailoverConnectionPlugin.ENABLE_CONNECT_FAILOVER.name, "true");
targetDataSourceProps.setProperty(RdsHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.name,
String.valueOf(TimeUnit.MINUTES.toMillis(5)));
targetDataSourceProps.setProperty(RdsHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.name,
"?." + proxyInfo.getInstanceEndpointSuffix());
targetDataSourceProps.setProperty(RdsHostListProvider.CLUSTER_ID.name, "HikariTestsCluster ");

AwsWrapperDataSource ds = new AwsWrapperDataSource();
ds.setTargetDataSourceProperties(targetDataSourceProps);
ds.setJdbcProtocol(DriverHelper.getDriverProtocol());
ds.setTargetDataSourceClassName(DriverHelper.getDataSourceClassname());
ds.setServerName(instanceInfo.getHost());
ds.setServerPort(String.valueOf(instanceInfo.getPort()));
ds.setDatabase(proxyInfo.getDefaultDbName());
ds.setUser(proxyInfo.getUsername());
ds.setPassword(proxyInfo.getPassword());

return ds;
}

private static void setupInternalConnectionPools() {
final HikariPooledConnectionProvider provider =
new HikariPooledConnectionProvider((hostSpec, props) -> {
HikariConfig config = new HikariConfig();
config.setMaximumPoolSize(30);
config.setMinimumIdle(2);
config.setIdleTimeout(TimeUnit.MINUTES.toMillis(15));
config.setInitializationFailTimeout(-1);
config.setConnectionTimeout(1500);
config.setKeepaliveTime(TimeUnit.MINUTES.toMillis(3));
config.setValidationTimeout(1000);
config.setMaxLifetime(TimeUnit.DAYS.toMillis(1));
config.setReadOnly(true);
config.setAutoCommit(true);
return config;
});
ConnectionProviderManager.setConnectionProvider(provider);
}

private HikariDataSource createHikariDataSource(final Properties customProps) {
final HikariConfig config = getConfig(customProps);
final HikariDataSource dataSource = new HikariDataSource(config);

Expand Down
Loading