From a17757c5152bef125a039a5fdac6f4bba069a066 Mon Sep 17 00:00:00 2001 From: Yuya Ebihara Date: Tue, 19 Nov 2024 19:07:16 +0900 Subject: [PATCH] Add $entries metadata table to Iceberg --- .../io/trino/plugin/iceberg/EntriesTable.java | 266 ++++++++++++++++++ .../trino/plugin/iceberg/IcebergMetadata.java | 1 + .../io/trino/plugin/iceberg/TableType.java | 1 + .../iceberg/BaseIcebergSystemTables.java | 100 ++++++- .../TestIcebergMetastoreAccessOperations.java | 9 +- 5 files changed, 375 insertions(+), 2 deletions(-) create mode 100644 plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java new file mode 100644 index 000000000000..0c8d54384192 --- /dev/null +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java @@ -0,0 +1,266 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg; + +import com.google.common.collect.ImmutableList; +import io.trino.plugin.iceberg.util.PageListBuilder; +import io.trino.spi.block.ArrayBlockBuilder; +import io.trino.spi.block.MapBlockBuilder; +import io.trino.spi.block.RowBlockBuilder; +import io.trino.spi.connector.ColumnMetadata; +import io.trino.spi.connector.ConnectorTableMetadata; +import io.trino.spi.connector.SchemaTableName; +import io.trino.spi.type.ArrayType; +import io.trino.spi.type.RowType; +import io.trino.spi.type.TimeZoneKey; +import io.trino.spi.type.TypeManager; +import io.trino.spi.type.TypeSignature; +import jakarta.annotation.Nullable; +import org.apache.iceberg.MetricsUtil.ReadableMetricsStruct; +import org.apache.iceberg.PartitionField; +import org.apache.iceberg.Table; +import org.apache.iceberg.transforms.Transforms; +import org.apache.iceberg.types.Conversions; +import org.apache.iceberg.types.Type; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.StructProjection; + +import java.nio.ByteBuffer; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutorService; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static io.airlift.slice.Slices.wrappedHeapBuffer; +import static io.trino.plugin.iceberg.FilesTable.getIcebergIdToTypeMapping; +import static io.trino.plugin.iceberg.IcebergTypes.convertIcebergValueToTrino; +import static io.trino.plugin.iceberg.IcebergUtil.getPartitionColumnType; +import static io.trino.plugin.iceberg.IcebergUtil.partitionTypes; +import static io.trino.plugin.iceberg.IcebergUtil.primitiveFieldTypes; +import static io.trino.plugin.iceberg.PartitionsTable.getAllPartitionFields; +import static io.trino.spi.type.BigintType.BIGINT; +import static io.trino.spi.type.IntegerType.INTEGER; +import static io.trino.spi.type.StandardTypes.JSON; +import static io.trino.spi.type.TypeSignature.mapType; +import static io.trino.spi.type.TypeUtils.writeNativeValue; +import static io.trino.spi.type.VarbinaryType.VARBINARY; +import static io.trino.spi.type.VarcharType.VARCHAR; +import static java.util.Objects.requireNonNull; +import static org.apache.iceberg.MetadataTableType.ENTRIES; + +// https://iceberg.apache.org/docs/latest/spark-queries/#entries +public class EntriesTable + extends BaseSystemTable +{ + private final Map idToTypeMapping; + private final List primitiveFields; + private final Optional partitionColumn; + private final List partitionTypes; + + public EntriesTable(TypeManager typeManager, SchemaTableName tableName, Table icebergTable, ExecutorService executor) + { + super( + requireNonNull(icebergTable, "icebergTable is null"), + new ConnectorTableMetadata( + requireNonNull(tableName, "tableName is null"), + columns(requireNonNull(typeManager, "typeManager is null"), icebergTable)), + ENTRIES, + executor); + idToTypeMapping = getIcebergIdToTypeMapping(icebergTable.schema()); + primitiveFields = IcebergUtil.primitiveFields(icebergTable.schema()).stream() + .sorted(Comparator.comparing(Types.NestedField::name)) + .collect(toImmutableList()); + List partitionFields = getAllPartitionFields(icebergTable); + partitionColumn = getPartitionColumnType(partitionFields, icebergTable.schema(), typeManager); + partitionTypes = partitionTypes(partitionFields, primitiveFieldTypes(icebergTable.schema())); + } + + private static List columns(TypeManager typeManager, Table icebergTable) + { + return ImmutableList.builder() + .add(new ColumnMetadata("status", INTEGER)) + .add(new ColumnMetadata("snapshot_id", BIGINT)) + .add(new ColumnMetadata("sequence_number", BIGINT)) + .add(new ColumnMetadata("file_sequence_number", BIGINT)) + .add(new ColumnMetadata("data_file", RowType.from(dataFileFieldMetadata(typeManager, icebergTable)))) + .add(new ColumnMetadata("readable_metrics", typeManager.getType(new TypeSignature(JSON)))) + .build(); + } + + private static List dataFileFieldMetadata(TypeManager typeManager, Table icebergTable) + { + List partitionFields = getAllPartitionFields(icebergTable); + Optional partitionColumnType = getPartitionColumnType(partitionFields, icebergTable.schema(), typeManager); + + ImmutableList.Builder fields = ImmutableList.builder(); + fields.add(new RowType.Field(Optional.of("content"), INTEGER)); + fields.add(new RowType.Field(Optional.of("file_path"), VARCHAR)); + fields.add(new RowType.Field(Optional.of("file_format"), VARCHAR)); + fields.add(new RowType.Field(Optional.of("spec_id"), INTEGER)); + partitionColumnType.ifPresent(type -> fields.add(new RowType.Field(Optional.of("partition"), type.rowType()))); + fields.add(new RowType.Field(Optional.of("record_count"), BIGINT)); + fields.add(new RowType.Field(Optional.of("file_size_in_bytes"), BIGINT)); + fields.add(new RowType.Field(Optional.of("column_sizes"), typeManager.getType(mapType(INTEGER.getTypeSignature(), BIGINT.getTypeSignature())))); + fields.add(new RowType.Field(Optional.of("value_counts"), typeManager.getType(mapType(INTEGER.getTypeSignature(), BIGINT.getTypeSignature())))); + fields.add(new RowType.Field(Optional.of("null_value_counts"), typeManager.getType(mapType(INTEGER.getTypeSignature(), BIGINT.getTypeSignature())))); + fields.add(new RowType.Field(Optional.of("nan_value_counts"), typeManager.getType(mapType(INTEGER.getTypeSignature(), BIGINT.getTypeSignature())))); + fields.add(new RowType.Field(Optional.of("lower_bounds"), typeManager.getType(mapType(INTEGER.getTypeSignature(), VARCHAR.getTypeSignature())))); + fields.add(new RowType.Field(Optional.of("upper_bounds"), typeManager.getType(mapType(INTEGER.getTypeSignature(), VARCHAR.getTypeSignature())))); + fields.add(new RowType.Field(Optional.of("key_metadata"), VARBINARY)); + fields.add(new RowType.Field(Optional.of("split_offsets"), new ArrayType(BIGINT))); + fields.add(new RowType.Field(Optional.of("equality_ids"), new ArrayType(INTEGER))); + fields.add(new RowType.Field(Optional.of("sort_order_id"), INTEGER)); + return fields.build(); + } + + @Override + protected void addRow(PageListBuilder pagesBuilder, Row row, TimeZoneKey timeZoneKey) + { + pagesBuilder.beginRow(); + pagesBuilder.appendInteger(row.get("status", Integer.class)); + pagesBuilder.appendBigint(row.get("snapshot_id", Long.class)); + pagesBuilder.appendBigint(row.get("sequence_number", Long.class)); + pagesBuilder.appendBigint(row.get("file_sequence_number", Long.class)); + StructProjection dataFile = row.get("data_file", StructProjection.class); + appendDataFile((RowBlockBuilder) pagesBuilder.nextColumn(), dataFile); + ReadableMetricsStruct readableMetrics = row.get("readable_metrics", ReadableMetricsStruct.class); + String readableMetricsJson = FilesTable.toJson(readableMetrics, primitiveFields); + pagesBuilder.appendVarchar(readableMetricsJson); + pagesBuilder.endRow(); + } + + private void appendDataFile(RowBlockBuilder blockBuilder, StructProjection dataFile) + { + blockBuilder.buildEntry(fieldBuilders -> { + Integer content = dataFile.get(0, Integer.class); + INTEGER.writeLong(fieldBuilders.get(0), content); + + String filePath = dataFile.get(1, String.class); + VARCHAR.writeString(fieldBuilders.get(1), filePath); + + String fileFormat = dataFile.get(2, String.class); + VARCHAR.writeString(fieldBuilders.get(2), fileFormat); + + Integer specId = dataFile.get(3, Integer.class); + INTEGER.writeLong(fieldBuilders.get(3), Long.valueOf(specId)); + + partitionColumn.ifPresent(type -> { + StructProjection partition = dataFile.get(4, StructProjection.class); + RowBlockBuilder partitionBlockBuilder = (RowBlockBuilder) fieldBuilders.get(4); + partitionBlockBuilder.buildEntry(partitionBuilder -> { + for (int i = 0; i < type.rowType().getFields().size(); i++) { + Type icebergType = partitionTypes.get(i); + io.trino.spi.type.Type trinoType = type.rowType().getFields().get(i).getType(); + Object value = null; + Integer fieldId = type.fieldIds().get(i); + if (fieldId != null) { + value = convertIcebergValueToTrino(icebergType, partition.get(i, icebergType.typeId().javaClass())); + } + writeNativeValue(trinoType, partitionBuilder.get(i), value); + } + }); + }); + + int position = partitionColumn.isEmpty() ? 4 : 5; + Long recordCount = dataFile.get(position, Long.class); + BIGINT.writeLong(fieldBuilders.get(position), recordCount); + + Long fileSizeInBytes = dataFile.get(++position, Long.class); + BIGINT.writeLong(fieldBuilders.get(position), fileSizeInBytes); + + //noinspection unchecked + Map columnSizes = dataFile.get(++position, Map.class); + appendIntegerBigintMap((MapBlockBuilder) fieldBuilders.get(position), columnSizes); + + //noinspection unchecked + Map valueCounts = dataFile.get(++position, Map.class); + appendIntegerBigintMap((MapBlockBuilder) fieldBuilders.get(position), valueCounts); + + //noinspection unchecked + Map nullValueCounts = dataFile.get(++position, Map.class); + appendIntegerBigintMap((MapBlockBuilder) fieldBuilders.get(position), nullValueCounts); + + //noinspection unchecked + Map nanValueCounts = dataFile.get(++position, Map.class); + appendIntegerBigintMap((MapBlockBuilder) fieldBuilders.get(position), nanValueCounts); + + //noinspection unchecked + Map lowerBounds = dataFile.get(++position, Map.class); + appendIntegerVarcharMap((MapBlockBuilder) fieldBuilders.get(position), lowerBounds); + + //noinspection unchecked + Map upperBounds = dataFile.get(++position, Map.class); + appendIntegerVarcharMap((MapBlockBuilder) fieldBuilders.get(position), upperBounds); + + ByteBuffer keyMetadata = dataFile.get(++position, ByteBuffer.class); + if (keyMetadata == null) { + fieldBuilders.get(position).appendNull(); + } + else { + VARBINARY.writeSlice(fieldBuilders.get(position), wrappedHeapBuffer(keyMetadata)); + } + + //noinspection unchecked + List splitOffsets = dataFile.get(++position, List.class); + appendBigintArray((ArrayBlockBuilder) fieldBuilders.get(position), splitOffsets); + + //noinspection unchecked + List equalityIds = dataFile.get(++position, List.class); + appendBigintArray((ArrayBlockBuilder) fieldBuilders.get(position), equalityIds); + + Integer sortOrderId = dataFile.get(++position, Integer.class); + INTEGER.writeLong(fieldBuilders.get(position), Long.valueOf(sortOrderId)); + }); + } + + public static void appendBigintArray(ArrayBlockBuilder blockBuilder, @Nullable List values) + { + if (values == null) { + blockBuilder.appendNull(); + return; + } + blockBuilder.buildEntry(elementBuilder -> { + for (Long value : values) { + BIGINT.writeLong(elementBuilder, value); + } + }); + } + + private static void appendIntegerBigintMap(MapBlockBuilder blockBuilder, @Nullable Map values) + { + if (values == null) { + blockBuilder.appendNull(); + return; + } + blockBuilder.buildEntry((keyBuilder, valueBuilder) -> values.forEach((key, value) -> { + INTEGER.writeLong(keyBuilder, key); + BIGINT.writeLong(valueBuilder, value); + })); + } + + private void appendIntegerVarcharMap(MapBlockBuilder blockBuilder, @Nullable Map values) + { + if (values == null) { + blockBuilder.appendNull(); + return; + } + blockBuilder.buildEntry((keyBuilder, valueBuilder) -> values.forEach((key, value) -> { + Type type = idToTypeMapping.get(key); + INTEGER.writeLong(keyBuilder, key); + VARCHAR.writeString(valueBuilder, Transforms.identity().toHumanString(type, Conversions.fromByteBuffer(type, value))); + })); + } +} diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java index 2a6af35d5c1d..7c298ee19822 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java @@ -700,6 +700,7 @@ private Optional getRawSystemTable(ConnectorSession session, Schema case ALL_MANIFESTS -> Optional.of(new AllManifestsTable(tableName, table, executor)); case MANIFESTS -> Optional.of(new ManifestsTable(tableName, table, getCurrentSnapshotId(table))); case FILES -> Optional.of(new FilesTable(tableName, typeManager, table, getCurrentSnapshotId(table), executor)); + case ENTRIES -> Optional.of(new EntriesTable(typeManager, tableName, table, executor)); case PROPERTIES -> Optional.of(new PropertiesTable(tableName, table)); case REFS -> Optional.of(new RefsTable(tableName, table, executor)); }; diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TableType.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TableType.java index 2ba845acc379..ab39c3504b6f 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TableType.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TableType.java @@ -23,6 +23,7 @@ public enum TableType MANIFESTS, PARTITIONS, FILES, + ENTRIES, PROPERTIES, REFS, MATERIALIZED_VIEW_STORAGE, diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java index 7212f3a3aa1d..e6fbf00ca54e 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java @@ -15,13 +15,19 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import io.trino.filesystem.TrinoFileSystemFactory; +import io.trino.metastore.HiveMetastore; import io.trino.spi.type.ArrayType; import io.trino.testing.AbstractTestQueryFramework; +import io.trino.testing.DistributedQueryRunner; import io.trino.testing.MaterializedResult; import io.trino.testing.MaterializedRow; import io.trino.testing.QueryRunner; import io.trino.testing.sql.TestTable; +import org.apache.iceberg.BaseTable; import org.apache.iceberg.FileContent; +import org.apache.iceberg.Snapshot; +import org.apache.iceberg.Table; import org.intellij.lang.annotations.Language; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -37,9 +43,12 @@ import static com.google.common.collect.ImmutableMap.toImmutableMap; import static io.trino.plugin.iceberg.IcebergFileFormat.ORC; import static io.trino.plugin.iceberg.IcebergFileFormat.PARQUET; +import static io.trino.plugin.iceberg.IcebergTestUtils.getFileSystemFactory; +import static io.trino.plugin.iceberg.IcebergTestUtils.getHiveMetastore; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.testing.MaterializedResult.DEFAULT_PRECISION; import static io.trino.testing.MaterializedResult.resultBuilder; +import static java.util.Locale.ENGLISH; import static java.util.Objects.requireNonNull; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; @@ -49,6 +58,8 @@ public abstract class BaseIcebergSystemTables extends AbstractTestQueryFramework { private final IcebergFileFormat format; + private HiveMetastore metastore; + private TrinoFileSystemFactory fileSystemFactory; protected BaseIcebergSystemTables(IcebergFileFormat format) { @@ -59,9 +70,12 @@ protected BaseIcebergSystemTables(IcebergFileFormat format) protected QueryRunner createQueryRunner() throws Exception { - return IcebergQueryRunner.builder() + DistributedQueryRunner queryRunner = IcebergQueryRunner.builder() .setIcebergProperties(ImmutableMap.of("iceberg.file-format", format.name())) .build(); + metastore = getHiveMetastore(queryRunner); + fileSystemFactory = getFileSystemFactory(queryRunner); + return queryRunner; } @BeforeAll @@ -549,6 +563,85 @@ public void testFilesTableWithDelete() assertUpdate("DROP TABLE IF EXISTS test_schema.test_table_with_delete"); } + @Test + void testEntriesTable() + { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_entries", "AS SELECT 1 id, DATE '2014-01-01' dt")) { + assertQuery("SHOW COLUMNS FROM \"" + table.getName() + "$entries\"", + "VALUES ('status', 'integer', '', '')," + + "('snapshot_id', 'bigint', '', '')," + + "('sequence_number', 'bigint', '', '')," + + "('file_sequence_number', 'bigint', '', '')," + + "('data_file', 'row(content integer, file_path varchar, file_format varchar, spec_id integer, record_count bigint, file_size_in_bytes bigint, " + + "column_sizes map(integer, bigint), value_counts map(integer, bigint), null_value_counts map(integer, bigint), nan_value_counts map(integer, bigint), " + + "lower_bounds map(integer, varchar), upper_bounds map(integer, varchar), key_metadata varbinary, split_offsets array(bigint), " + + "equality_ids array(integer), sort_order_id integer)', '', '')," + + "('readable_metrics', 'json', '', '')"); + + Table icebergTable = loadTable(table.getName()); + Snapshot snapshot = icebergTable.currentSnapshot(); + long snapshotId = snapshot.snapshotId(); + long sequenceNumber = snapshot.sequenceNumber(); + + assertThat(computeScalar("SELECT status FROM \"" + table.getName() + "$entries\"")) + .isEqualTo(1); + assertThat(computeScalar("SELECT snapshot_id FROM \"" + table.getName() + "$entries\"")) + .isEqualTo(snapshotId); + assertThat(computeScalar("SELECT sequence_number FROM \"" + table.getName() + "$entries\"")) + .isEqualTo(sequenceNumber); + assertThat(computeScalar("SELECT file_sequence_number FROM \"" + table.getName() + "$entries\"")) + .isEqualTo(1L); + + MaterializedRow dataFile = (MaterializedRow) computeScalar("SELECT data_file FROM \"" + table.getName() + "$entries\""); + assertThat(dataFile.getFieldCount()).isEqualTo(16); + assertThat(dataFile.getField(0)).isEqualTo(0); // content + assertThat((String) dataFile.getField(1)).endsWith(format.toString().toLowerCase(ENGLISH)); // file_path + assertThat(dataFile.getField(2)).isEqualTo(format.toString()); // file_format + assertThat(dataFile.getField(3)).isEqualTo(0); // spec_id + assertThat(dataFile.getField(4)).isEqualTo(1L); // record_count + assertThat((long) dataFile.getField(5)).isPositive(); // file_size_in_bytes + assertThat(dataFile.getField(6)).isEqualTo(value(Map.of(1, 36L, 2, 36L), null)); // column_sizes + assertThat(dataFile.getField(7)).isEqualTo(Map.of(1, 1L, 2, 1L)); // value_counts + assertThat(dataFile.getField(8)).isEqualTo(Map.of(1, 0L, 2, 0L)); // null_value_counts + assertThat(dataFile.getField(9)).isEqualTo(value(Map.of(), null)); // nan_value_counts + assertThat(dataFile.getField(10)).isEqualTo(Map.of(1, "1", 2, "2014-01-01")); // lower_bounds + assertThat(dataFile.getField(11)).isEqualTo(Map.of(1, "1", 2, "2014-01-01")); // upper_bounds + assertThat(dataFile.getField(12)).isNull(); // key_metadata + assertThat(dataFile.getField(13)).isEqualTo(List.of(value(4L, 3L))); // split_offsets + assertThat(dataFile.getField(14)).isNull(); // equality_ids + assertThat(dataFile.getField(15)).isEqualTo(0); // sort_order_id + + assertThat(computeScalar("SELECT readable_metrics FROM \"" + table.getName() + "$entries\"")) + .isEqualTo("{" + + "\"dt\":{\"column_size\":" + value(36, null) + ",\"value_count\":1,\"null_value_count\":0,\"nan_value_count\":null,\"lower_bound\":\"2014-01-01\",\"upper_bound\":\"2014-01-01\"}," + + "\"id\":{\"column_size\":" + value(36, null) + ",\"value_count\":1,\"null_value_count\":0,\"nan_value_count\":null,\"lower_bound\":1,\"upper_bound\":1}" + + "}"); + } + } + + @Test + void testEntriesPartitionTable() + { + try (TestTable table = new TestTable( + getQueryRunner()::execute, + "test_entries_partition", + "WITH (partitioning = ARRAY['dt']) AS SELECT 1 id, DATE '2014-01-01' dt")) { + assertQuery("SHOW COLUMNS FROM \"" + table.getName() + "$entries\"", + "VALUES ('status', 'integer', '', '')," + + "('snapshot_id', 'bigint', '', '')," + + "('sequence_number', 'bigint', '', '')," + + "('file_sequence_number', 'bigint', '', '')," + + "('data_file', 'row(content integer, file_path varchar, file_format varchar, spec_id integer, partition row(dt date), record_count bigint, file_size_in_bytes bigint, " + + "column_sizes map(integer, bigint), value_counts map(integer, bigint), null_value_counts map(integer, bigint), nan_value_counts map(integer, bigint), " + + "lower_bounds map(integer, varchar), upper_bounds map(integer, varchar), key_metadata varbinary, split_offsets array(bigint), " + + "equality_ids array(integer), sort_order_id integer)', '', '')," + + "('readable_metrics', 'json', '', '')"); + + assertThat(query("SELECT data_file.partition FROM \"" + table.getName() + "$entries\"")) + .matches("SELECT CAST(ROW(DATE '2014-01-01') AS ROW(dt date))"); + } + } + private Long nanCount(long value) { // Parquet does not have nan count metrics @@ -565,4 +658,9 @@ private Object value(Object parquet, Object orc) { return format == PARQUET ? parquet : orc; } + + private BaseTable loadTable(String tableName) + { + return IcebergTestUtils.loadTable(tableName, metastore, fileSystemFactory, "hive", "tpch"); + } } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMetastoreAccessOperations.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMetastoreAccessOperations.java index 23a3b809d513..401e68e38c90 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMetastoreAccessOperations.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMetastoreAccessOperations.java @@ -38,6 +38,7 @@ import static io.trino.plugin.iceberg.IcebergSessionProperties.COLLECT_EXTENDED_STATISTICS_ON_WRITE; import static io.trino.plugin.iceberg.TableType.ALL_MANIFESTS; import static io.trino.plugin.iceberg.TableType.DATA; +import static io.trino.plugin.iceberg.TableType.ENTRIES; import static io.trino.plugin.iceberg.TableType.FILES; import static io.trino.plugin.iceberg.TableType.HISTORY; import static io.trino.plugin.iceberg.TableType.MANIFESTS; @@ -338,6 +339,12 @@ public void testSelectSystemTable() .addCopies(GET_TABLE, 1) .build()); + // select from $entries + assertMetastoreInvocations("SELECT * FROM \"test_select_snapshots$entries\"", + ImmutableMultiset.builder() + .addCopies(GET_TABLE, 1) + .build()); + // select from $properties assertMetastoreInvocations("SELECT * FROM \"test_select_snapshots$properties\"", ImmutableMultiset.builder() @@ -349,7 +356,7 @@ public void testSelectSystemTable() // This test should get updated if a new system table is added. assertThat(TableType.values()) - .containsExactly(DATA, HISTORY, METADATA_LOG_ENTRIES, SNAPSHOTS, ALL_MANIFESTS, MANIFESTS, PARTITIONS, FILES, PROPERTIES, REFS, MATERIALIZED_VIEW_STORAGE); + .containsExactly(DATA, HISTORY, METADATA_LOG_ENTRIES, SNAPSHOTS, ALL_MANIFESTS, MANIFESTS, PARTITIONS, FILES, ENTRIES, PROPERTIES, REFS, MATERIALIZED_VIEW_STORAGE); } @Test