@Internal public class FileSystemTableSource extends Object implements org.apache.flink.table.connector.source.ScanTableSource, org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown, org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown, org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDown, org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown, org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata, org.apache.flink.table.connector.source.abilities.SupportsStatisticReport
org.apache.flink.table.connector.source.ScanTableSource.ScanContext, org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider| Constructor and Description |
|---|
FileSystemTableSource(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
org.apache.flink.table.types.DataType physicalRowDataType,
List<String> partitionKeys,
org.apache.flink.configuration.ReadableConfig tableOptions,
org.apache.flink.table.connector.format.DecodingFormat<BulkFormat<org.apache.flink.table.data.RowData,FileSourceSplit>> bulkReaderFormat,
org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> deserializationFormat) |
| Modifier and Type | Method and Description |
|---|---|
org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown.Result |
applyFilters(List<org.apache.flink.table.expressions.ResolvedExpression> filters) |
void |
applyLimit(long limit) |
void |
applyPartitions(List<Map<String,String>> remainingPartitions) |
void |
applyProjection(int[][] projectedFields,
org.apache.flink.table.types.DataType producedDataType) |
void |
applyReadableMetadata(List<String> metadataKeys,
org.apache.flink.table.types.DataType producedDataType) |
String |
asSummaryString() |
FileSystemTableSource |
copy() |
org.apache.flink.table.connector.ChangelogMode |
getChangelogMode() |
org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider |
getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext) |
Optional<List<Map<String,String>>> |
listPartitions() |
Map<String,org.apache.flink.table.types.DataType> |
listReadableMetadata() |
org.apache.flink.table.plan.stats.TableStats |
reportStatistics() |
boolean |
supportsNestedProjection() |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, waitpublic FileSystemTableSource(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
org.apache.flink.table.types.DataType physicalRowDataType,
List<String> partitionKeys,
org.apache.flink.configuration.ReadableConfig tableOptions,
@Nullable
org.apache.flink.table.connector.format.DecodingFormat<BulkFormat<org.apache.flink.table.data.RowData,FileSourceSplit>> bulkReaderFormat,
@Nullable
org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> deserializationFormat)
public org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext)
getScanRuntimeProvider in interface org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.ChangelogMode getChangelogMode()
getChangelogMode in interface org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown.Result applyFilters(List<org.apache.flink.table.expressions.ResolvedExpression> filters)
applyFilters in interface org.apache.flink.table.connector.source.abilities.SupportsFilterPushDownpublic void applyLimit(long limit)
applyLimit in interface org.apache.flink.table.connector.source.abilities.SupportsLimitPushDownpublic Optional<List<Map<String,String>>> listPartitions()
listPartitions in interface org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDownpublic void applyPartitions(List<Map<String,String>> remainingPartitions)
applyPartitions in interface org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDownpublic boolean supportsNestedProjection()
supportsNestedProjection in interface org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDownpublic org.apache.flink.table.plan.stats.TableStats reportStatistics()
reportStatistics in interface org.apache.flink.table.connector.source.abilities.SupportsStatisticReportpublic FileSystemTableSource copy()
copy in interface org.apache.flink.table.connector.source.DynamicTableSourcepublic String asSummaryString()
asSummaryString in interface org.apache.flink.table.connector.source.DynamicTableSourcepublic void applyProjection(int[][] projectedFields,
org.apache.flink.table.types.DataType producedDataType)
applyProjection in interface org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDownpublic void applyReadableMetadata(List<String> metadataKeys, org.apache.flink.table.types.DataType producedDataType)
applyReadableMetadata in interface org.apache.flink.table.connector.source.abilities.SupportsReadingMetadataCopyright © 2014–2025 The Apache Software Foundation. All rights reserved.