Skip to content

Commit

Permalink
updates from feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
zeroshade committed Oct 17, 2024
1 parent e6e6278 commit 9674fd7
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 8 deletions.
13 changes: 7 additions & 6 deletions c/validation/adbc_validation_connection.cc
Original file line number Diff line number Diff line change
Expand Up @@ -701,7 +701,9 @@ void ConnectionTest::TestMetadataGetObjectsTablesTypes() {
db_schemas_index <
ArrowArrayViewListChildOffset(catalog_db_schemas_list, row + 1);
db_schemas_index++) {
// db_schema_tables should either be null or an empty list
ASSERT_FALSE(ArrowArrayViewIsNull(db_schema_tables_list, db_schemas_index))
<< "Row " << row << " should have non-null db_schema_tables";

for (int64_t tables_index =
ArrowArrayViewListChildOffset(db_schema_tables_list, db_schemas_index);
tables_index <
Expand Down Expand Up @@ -742,6 +744,7 @@ void ConnectionTest::TestMetadataGetObjectsColumns() {

struct TestCase {
std::optional<std::string> filter;
// the pair is column name & ordinal position of the column
std::vector<std::pair<std::string, int32_t>> columns;
};

Expand Down Expand Up @@ -846,11 +849,9 @@ void ConnectionTest::TestMetadataGetObjectsColumns() {
} while (reader.array->release);

ASSERT_TRUE(found_expected_table) << "Did (not) find table in metadata";
// metadata columns do not guarantee the order they are returned in, we can
// avoid the test being flakey by sorting the column names we found
std::sort(columns.begin(), columns.end(),
[](const auto& a, const auto& b) -> bool { return a.first < b.first; });
ASSERT_EQ(test_case.columns, columns);
// metadata columns do not guarantee the order they are returned in, just
// validate all the elements are there.
ASSERT_THAT(columns, testing::UnorderedElementsAreArray(test_case.columns));
}
}

Expand Down
12 changes: 10 additions & 2 deletions go/adbc/driver/snowflake/connection.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,11 +190,17 @@ func (c *connectionImpl) GetObjects(ctx context.Context, depth adbc.ObjectDepth,
}
}

// force empty result from SHOW TABLES if tableType list is not empty
// and does not contain TABLE or VIEW in the list.
// we need this because we should have non-null db_schema_tables when
// depth is Tables, Columns or All.
var badTableType = "tabletypedoesnotexist"
if len(tableType) > 0 && depth >= adbc.ObjectDepthTables && !hasViews && !hasTables {
depth = adbc.ObjectDepthDBSchemas
tableName = &badTableType
tableType = []string{"TABLE"}
}
gQueryIDs, gQueryIDsCtx := errgroup.WithContext(ctx)

gQueryIDs, gQueryIDsCtx := errgroup.WithContext(ctx)
queryFile := queryTemplateGetObjectsAll
switch depth {
case adbc.ObjectDepthCatalogs:
Expand All @@ -213,6 +219,7 @@ func (c *connectionImpl) GetObjects(ctx context.Context, depth adbc.ObjectDepth,
catalog, dbSchema, tableName, &showSchemaQueryID)
goGetQueryID(gQueryIDsCtx, conn, gQueryIDs, objDatabases,
catalog, dbSchema, tableName, &terseDbQueryID)

objType := objObjects
if len(tableType) == 1 {
if strings.EqualFold("VIEW", tableType[0]) {
Expand All @@ -221,6 +228,7 @@ func (c *connectionImpl) GetObjects(ctx context.Context, depth adbc.ObjectDepth,
objType = objTables
}
}

goGetQueryID(gQueryIDsCtx, conn, gQueryIDs, objType,
catalog, dbSchema, tableName, &tableQueryID)
default:
Expand Down

0 comments on commit 9674fd7

Please sign in to comment.