@@ -70,9 +70,7 @@ private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: Hive
70
70
val cacheLoader = new CacheLoader [QualifiedTableName , LogicalPlan ]() {
71
71
override def load (in : QualifiedTableName ): LogicalPlan = {
72
72
logDebug(s " Creating new cached data source for $in" )
73
- val table = HiveMetastoreCatalog .this .synchronized {
74
- client.getTable(in.database, in.name)
75
- }
73
+ val table = client.getTable(in.database, in.name)
76
74
77
75
def schemaStringFromParts : Option [String ] = {
78
76
table.properties.get(" spark.sql.sources.schema.numParts" ).map { numParts =>
@@ -179,14 +177,14 @@ private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: Hive
179
177
serdeProperties = options))
180
178
}
181
179
182
- def hiveDefaultTableFilePath (tableName : String ): String = synchronized {
180
+ def hiveDefaultTableFilePath (tableName : String ): String = {
183
181
// Code based on: hiveWarehouse.getTablePath(currentDatabase, tableName)
184
182
new Path (
185
183
new Path (client.getDatabase(client.currentDatabase).location),
186
184
tableName.toLowerCase).toString
187
185
}
188
186
189
- def tableExists (tableIdentifier : Seq [String ]): Boolean = synchronized {
187
+ def tableExists (tableIdentifier : Seq [String ]): Boolean = {
190
188
val tableIdent = processTableIdentifier(tableIdentifier)
191
189
val databaseName =
192
190
tableIdent
@@ -312,7 +310,7 @@ private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: Hive
312
310
result.newInstance()
313
311
}
314
312
315
- override def getTables (databaseName : Option [String ]): Seq [(String , Boolean )] = synchronized {
313
+ override def getTables (databaseName : Option [String ]): Seq [(String , Boolean )] = {
316
314
val db = databaseName.getOrElse(client.currentDatabase)
317
315
318
316
client.listTables(db).map(tableName => (tableName, false ))
0 commit comments