Yes. We are using the Tabular Iceberg Sink Connector to store a Kafka Changelog topic in Iceberg with the following connector config:
{
"connector.class": "io.tabular.iceberg.connect.IcebergSinkConnector",
"topics": "redacted-changelog",
"name": "redacted-iceberg-sink",
"tasks.max": "1",
"iceberg.control.commit.interval-ms": "30000",
"iceberg.tables": "redacted_ns.redacted_tablename",
"iceberg.tables.default-id-columns": "redacted, redacted, redacted",
"iceberg.tables.default-partition-by": "bucket(redacted, 5)",
"iceberg.tables.upsert-mode-enabled": "True",
"iceberg.tables.schema-case-insensitive": "True",
"iceberg.tables.auto-create-enabled": "True",
"iceberg.tables.auto-create-props.write.metadata.compression-codec": "gzip",
"iceberg.tables.auto-create-props.write.distribution-mode": "range",
"iceberg.tables.auto-create-props.write.metadata.previous-versions-max": "500",
"iceberg.tables.auto-create-props.write.metadata.delete-after-commit.enabled": "true",
"iceberg.tables.auto-create-props.write.target-file-size-bytes": "268435456",
"iceberg.catalog.default-namespace": "redacted_ns",
"iceberg.catalog.io-impl": "org.apache.iceberg.aws.s3.S3FileIO",
"iceberg.catalog.s3.path-style-access": "true",
"iceberg.catalog.s3.delete-enabled": "false",
"iceberg.catalog.type": "hadoop",
"iceberg.catalog": "iceberg",
"iceberg.catalog.s3.endpoint": "http://redacted",
"iceberg.catalog.zookeeper.connectionString": "redacted:2181",
"iceberg.catalog.lock-impl": "our.custom.ZookeeperLocker",
"iceberg.catalog.warehouse": "s3a://redacted/",
"iceberg.catalog.s3.access-key-id": "redacted",
"iceberg.catalog.s3.secret-access-key": "redacted",
"iceberg.hadoop.fs.s3a.secret.key": "redacted",
"iceberg.hadoop.fs.s3a.access.key": "redacted",
"iceberg.hadoop.fs.s3a.connection.ssl.enabled": "false",
"iceberg.hadoop.fs.s3a.impl": "org.apache.hadoop.fs.s3a.S3AFileSystem",
"iceberg.hadoop.fs.s3a.path.style.access": "true",
"iceberg.hadoop.fs.s3a.endpoint": "http://redacted",
"iceberg.hadoop.fs.s3a.multipart.size": "100M",
"iceberg.hadoop.fs.s3a.multipart.threshold": "2G",
"key.converter": "io.confluent.connect.avro.AvroConverter",
"key.converter.schema.registry.url": "http://redacted:8081",
"value.converter": "io.confluent.connect.avro.AvroConverter",
"value.converter.schema.registry.url": "http://redacted:8081"
}
The table is readable in Spark, but causes the reported error when trying to access it from Dremio