This is an automated email from the ASF dual-hosted git repository. snlee pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git
The following commit(s) were added to refs/heads/master by this push: new 3ae95ba Cleaning up getTableName() for segment metadata (#4289) 3ae95ba is described below commit 3ae95badf7e38a0074d36c2f00c6de0a2da69807 Author: Seunghyun Lee <sn...@linkedin.com> AuthorDate: Mon Jun 10 17:46:23 2019 -0700 Cleaning up getTableName() for segment metadata (#4289) * Cleaning up getTableName() for segment metadata Removing unnecessary calls to segmentMetadata.getTableName() * Remove segmentMetadata.getTableName from memory estimator and segment query processor --- .../broker/broker/HelixBrokerStarterTest.java | 8 +++-- .../PinotSegmentUploadRestletResource.java | 35 +++++++++++----------- .../controller/api/upload/SegmentValidator.java | 8 ++--- .../pinot/controller/api/upload/ZKOperator.java | 12 ++++---- .../helix/core/PinotHelixResourceManager.java | 35 ++++++++++++---------- .../BalanceNumSegmentAssignmentStrategy.java | 11 ++++--- .../core/sharding/BucketizedSegmentStrategy.java | 9 +++--- .../core/sharding/RandomAssignmentStrategy.java | 8 ++--- .../ReplicaGroupSegmentAssignmentStrategy.java | 13 ++++---- .../core/sharding/SegmentAssignmentStrategy.java | 4 +-- .../resources/PinotSegmentRestletResourceTest.java | 4 +-- .../controller/api/resources/TableViewsTest.java | 5 ++-- .../helix/ControllerInstanceToggleTest.java | 3 +- .../controller/helix/ControllerSentinelTestV2.java | 3 +- .../controller/helix/PinotResourceManagerTest.java | 5 ++-- .../helix/core/retention/RetentionManagerTest.java | 1 - .../sharding/SegmentAssignmentStrategyTest.java | 6 ++-- .../controller/utils/ReplicaGroupTestUtils.java | 4 +-- .../validation/ValidationManagerTest.java | 4 +-- .../immutable/ImmutableSegmentLoader.java | 5 ++-- .../indexsegment/mutable/MutableSegmentImpl.java | 3 +- .../pinot/core/minion/BackfillDateTimeColumn.java | 9 +++--- .../pinot/core/minion/RawIndexConverter.java | 14 +++++---- .../apache/pinot/core/minion/SegmentPurger.java | 15 +++++----- .../virtualcolumn/VirtualColumnContext.java | 8 +---- .../readers/BackfillDateTimeRecordReaderTest.java | 5 ++-- .../pinot/core/minion/SegmentPurgerTest.java | 2 +- .../executor/ConvertToRawIndexTaskExecutor.java | 5 +++- .../pinot/minion/executor/PurgeTaskExecutor.java | 2 +- .../apache/pinot/perf/BenchmarkQueryEngine.java | 8 ++--- .../command/BackfillDateTimeColumnCommand.java | 4 ++- .../pinot/tools/perf/PerfBenchmarkDriver.java | 5 ++-- .../pinot/tools/perf/PerfBenchmarkRunner.java | 4 +-- .../query/comparison/StarTreeQueryGenerator.java | 10 +++---- .../realtime/provisioning/MemoryEstimator.java | 10 +++---- .../tools/scan/query/SegmentQueryProcessor.java | 8 ----- 36 files changed, 147 insertions(+), 148 deletions(-) diff --git a/pinot-broker/src/test/java/org/apache/pinot/broker/broker/HelixBrokerStarterTest.java b/pinot-broker/src/test/java/org/apache/pinot/broker/broker/HelixBrokerStarterTest.java index 4ac704f..49f5557 100644 --- a/pinot-broker/src/test/java/org/apache/pinot/broker/broker/HelixBrokerStarterTest.java +++ b/pinot-broker/src/test/java/org/apache/pinot/broker/broker/HelixBrokerStarterTest.java @@ -96,7 +96,8 @@ public class HelixBrokerStarterTest extends ControllerTest { for (int i = 0; i < 5; i++) { _helixResourceManager - .addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(RAW_DINING_TABLE_NAME), "downloadUrl"); + .addNewSegment(DINING_TABLE_NAME, SegmentMetadataMockUtils.mockSegmentMetadata(RAW_DINING_TABLE_NAME), + "downloadUrl"); } Thread.sleep(1000); @@ -216,7 +217,8 @@ public class HelixBrokerStarterTest extends ControllerTest { 5); _helixResourceManager - .addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(RAW_DINING_TABLE_NAME), "downloadUrl"); + .addNewSegment(DINING_TABLE_NAME, SegmentMetadataMockUtils.mockSegmentMetadata(RAW_DINING_TABLE_NAME), + "downloadUrl"); // Wait up to 30s for external view to reach the expected size waitForPredicate(new Callable<Boolean>() { @@ -268,7 +270,7 @@ public class HelixBrokerStarterTest extends ControllerTest { OfflineSegmentZKMetadata offlineSegmentZKMetadata = _helixResourceManager.getOfflineSegmentZKMetadata(RAW_DINING_TABLE_NAME, segment); Assert.assertNotNull(offlineSegmentZKMetadata); - _helixResourceManager.refreshSegment( + _helixResourceManager.refreshSegment(DINING_TABLE_NAME, SegmentMetadataMockUtils.mockSegmentMetadataWithEndTimeInfo(RAW_DINING_TABLE_NAME, segment, endTime++), offlineSegmentZKMetadata); } diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotSegmentUploadRestletResource.java b/pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotSegmentUploadRestletResource.java index 4581d9d..5259d9f 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotSegmentUploadRestletResource.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotSegmentUploadRestletResource.java @@ -303,19 +303,21 @@ public class PinotSegmentUploadRestletResource { throw new UnsupportedOperationException("Unsupported upload type: " + uploadType); } + String rawTableName = segmentMetadata.getTableName(); + // This boolean is here for V1 segment upload, where we keep the segment in the downloadURI sent in the header. // We will deprecate this behavior eventually. if (!moveSegmentToFinalLocation) { LOGGER.info("Setting zkDownloadUri to {} for segment {} of table {}, skipping move", currentSegmentLocationURI, - segmentMetadata.getName(), segmentMetadata.getTableName()); + segmentMetadata.getName(), rawTableName); zkDownloadUri = currentSegmentLocationURI; } else { - zkDownloadUri = getZkDownloadURIForSegmentUpload(segmentMetadata, provider); + zkDownloadUri = getZkDownloadURIForSegmentUpload(rawTableName, segmentMetadata, provider); } String clientAddress = InetAddress.getByName(request.getRemoteAddr()).getHostName(); String segmentName = segmentMetadata.getName(); - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(segmentMetadata.getTableName()); + String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName); LOGGER .info("Processing upload request for segment: {} of table: {} from client: {}", segmentName, offlineTableName, clientAddress); @@ -323,15 +325,15 @@ public class PinotSegmentUploadRestletResource { // Validate segment SegmentValidatorResponse segmentValidatorResponse = new SegmentValidator(_pinotHelixResourceManager, _controllerConf, _executor, _connectionManager, - _controllerMetrics, _controllerLeadershipManager).validateSegment(segmentMetadata, tempSegmentDir); + _controllerMetrics, _controllerLeadershipManager) + .validateSegment(rawTableName, segmentMetadata, tempSegmentDir); // Zk operations - completeZkOperations(enableParallelPushProtection, headers, tempEncryptedFile, provider, segmentMetadata, - segmentName, zkDownloadUri, moveSegmentToFinalLocation, segmentValidatorResponse); + completeZkOperations(enableParallelPushProtection, headers, tempEncryptedFile, provider, rawTableName, + segmentMetadata, segmentName, zkDownloadUri, moveSegmentToFinalLocation, segmentValidatorResponse); return new SuccessResponse( - "Successfully uploaded segment: " + segmentMetadata.getName() + " of table: " + segmentMetadata - .getTableName()); + "Successfully uploaded segment: " + segmentMetadata.getName() + " of table: " + rawTableName); } catch (WebApplicationException e) { throw e; } catch (Exception e) { @@ -345,16 +347,16 @@ public class PinotSegmentUploadRestletResource { } } - private String getZkDownloadURIForSegmentUpload(SegmentMetadata segmentMetadata, FileUploadPathProvider provider) + private String getZkDownloadURIForSegmentUpload(String rawTableName, SegmentMetadata segmentMetadata, + FileUploadPathProvider provider) throws UnsupportedEncodingException { if (provider.getBaseDataDirURI().getScheme().equalsIgnoreCase(CommonConstants.Segment.LOCAL_SEGMENT_SCHEME)) { - return ControllerConf - .constructDownloadUrl(segmentMetadata.getTableName(), segmentMetadata.getName(), provider.getVip()); + return ControllerConf.constructDownloadUrl(rawTableName, segmentMetadata.getName(), provider.getVip()); } else { // Receiving .tar.gz segment upload for pluggable storage LOGGER.info("Using configured data dir {} for segment {} of table {}", _controllerConf.getDataDir(), - segmentMetadata.getName(), segmentMetadata.getTableName()); - return StringUtil.join("/", provider.getBaseDataDirURI().toString(), segmentMetadata.getTableName(), + segmentMetadata.getName(), rawTableName); + return StringUtil.join("/", provider.getBaseDataDirURI().toString(), rawTableName, URLEncoder.encode(segmentMetadata.getName(), "UTF-8")); } } @@ -386,15 +388,14 @@ public class PinotSegmentUploadRestletResource { } private void completeZkOperations(boolean enableParallelPushProtection, HttpHeaders headers, File tempDecryptedFile, - FileUploadPathProvider provider, SegmentMetadata segmentMetadata, String segmentName, String zkDownloadURI, + FileUploadPathProvider provider, String rawTableName, SegmentMetadata segmentMetadata, String segmentName, String zkDownloadURI, boolean moveSegmentToFinalLocation, SegmentValidatorResponse segmentValidatorResponse) throws Exception { String finalSegmentPath = StringUtil - .join("/", provider.getBaseDataDirURI().toString(), segmentMetadata.getTableName(), - URLEncoder.encode(segmentName, "UTF-8")); + .join("/", provider.getBaseDataDirURI().toString(), rawTableName, URLEncoder.encode(segmentName, "UTF-8")); URI finalSegmentLocationURI = new URI(finalSegmentPath); ZKOperator zkOperator = new ZKOperator(_pinotHelixResourceManager, _controllerConf, _controllerMetrics); - zkOperator.completeSegmentOperations(segmentMetadata, finalSegmentLocationURI, tempDecryptedFile, + zkOperator.completeSegmentOperations(rawTableName, segmentMetadata, finalSegmentLocationURI, tempDecryptedFile, enableParallelPushProtection, headers, zkDownloadURI, moveSegmentToFinalLocation, segmentValidatorResponse); } diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/SegmentValidator.java b/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/SegmentValidator.java index 740cd22..6af7af7 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/SegmentValidator.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/SegmentValidator.java @@ -68,8 +68,8 @@ public class SegmentValidator { _controllerLeadershipManager = controllerLeadershipManager; } - public SegmentValidatorResponse validateSegment(SegmentMetadata segmentMetadata, File tempSegmentDir) { - String rawTableName = segmentMetadata.getTableName(); + public SegmentValidatorResponse validateSegment(String rawTableName, SegmentMetadata segmentMetadata, + File tempSegmentDir) { String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName); String segmentName = segmentMetadata.getName(); TableConfig offlineTableConfig = @@ -86,7 +86,7 @@ public class SegmentValidator { _pinotHelixResourceManager.getSegmentMetadataZnRecord(offlineTableName, segmentName); // Checks whether it's a new segment or an existing one. if (segmentMetadataZnRecord == null) { - assignedInstances = _pinotHelixResourceManager.getAssignedInstancesForSegment(segmentMetadata); + assignedInstances = _pinotHelixResourceManager.getAssignedInstancesForSegment(rawTableName, segmentMetadata); if (assignedInstances.isEmpty()) { throw new ControllerApplicationException(LOGGER, "No assigned Instances for Segment: " + segmentName + ". Please check whether the table config is misconfigured.", Response.Status.INTERNAL_SERVER_ERROR); @@ -136,7 +136,7 @@ public class SegmentValidator { StorageQuotaChecker quotaChecker = new StorageQuotaChecker(offlineTableConfig, tableSizeReader, _controllerMetrics, _pinotHelixResourceManager, _controllerLeadershipManager); - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(metadata.getTableName()); + String offlineTableName = offlineTableConfig.getTableName(); return quotaChecker.isSegmentStorageWithinQuota(segmentFile, offlineTableName, metadata.getName(), _controllerConf.getServerAdminRequestTimeoutSeconds() * 1000); } diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/ZKOperator.java b/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/ZKOperator.java index f836aae..52d2caa 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/ZKOperator.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/api/upload/ZKOperator.java @@ -57,11 +57,11 @@ public class ZKOperator { _controllerMetrics = controllerMetrics; } - public void completeSegmentOperations(SegmentMetadata segmentMetadata, URI finalSegmentLocationURI, - File currentSegmentLocation, boolean enableParallelPushProtection, HttpHeaders headers, String zkDownloadURI, - boolean moveSegmentToFinalLocation, SegmentValidatorResponse segmentValidatorResponse) + public void completeSegmentOperations(String rawTableName, SegmentMetadata segmentMetadata, + URI finalSegmentLocationURI, File currentSegmentLocation, boolean enableParallelPushProtection, + HttpHeaders headers, String zkDownloadURI, boolean moveSegmentToFinalLocation, + SegmentValidatorResponse segmentValidatorResponse) throws Exception { - String rawTableName = segmentMetadata.getTableName(); String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName); String segmentName = segmentMetadata.getName(); @@ -176,7 +176,7 @@ public class ZKOperator { zkDownloadURI); } - _pinotHelixResourceManager.refreshSegment(segmentMetadata, existingSegmentZKMetadata); + _pinotHelixResourceManager.refreshSegment(offlineTableName, segmentMetadata, existingSegmentZKMetadata); } } catch (Exception e) { if (!_pinotHelixResourceManager.updateZkMetadata(existingSegmentZKMetadata)) { @@ -223,7 +223,7 @@ public class ZKOperator { LOGGER.info("Skipping segment move, keeping segment {} from table {} at {}", segmentName, rawTableName, zkDownloadURI); } - _pinotHelixResourceManager.addNewSegment(segmentMetadata, zkDownloadURI, crypter, assignedInstances); + _pinotHelixResourceManager.addNewSegment(rawTableName, segmentMetadata, zkDownloadURI, crypter, assignedInstances); } private void moveSegmentToPermanentDirectory(File currentSegmentLocation, URI finalSegmentLocationURI) diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java index cb57eb4..a4d4d0b 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java @@ -1576,16 +1576,17 @@ public class PinotHelixResourceManager { return instanceSet; } - public void addNewSegment(@Nonnull SegmentMetadata segmentMetadata, @Nonnull String downloadUrl) { - List<String> assignedInstances = getAssignedInstancesForSegment(segmentMetadata); - addNewSegment(segmentMetadata, downloadUrl, null, assignedInstances); + public void addNewSegment(@Nonnull String rawTableName, @Nonnull SegmentMetadata segmentMetadata, + @Nonnull String downloadUrl) { + List<String> assignedInstances = getAssignedInstancesForSegment(rawTableName, segmentMetadata); + addNewSegment(rawTableName, segmentMetadata, downloadUrl, null, assignedInstances); } - public void addNewSegment(@Nonnull SegmentMetadata segmentMetadata, @Nonnull String downloadUrl, String crypter, - @Nonnull List<String> assignedInstances) { + public void addNewSegment(@Nonnull String rawTableName, @Nonnull SegmentMetadata segmentMetadata, + @Nonnull String downloadUrl, String crypter, @Nonnull List<String> assignedInstances) { Preconditions.checkNotNull(assignedInstances, "Assigned Instances should not be null!"); String segmentName = segmentMetadata.getName(); - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(segmentMetadata.getTableName()); + String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName); // NOTE: must first set the segment ZK metadata before trying to update ideal state because server will need the // segment ZK metadata to download and load the segment @@ -1600,7 +1601,7 @@ public class PinotHelixResourceManager { } LOGGER.info("Added segment: {} of table: {} to property store", segmentName, offlineTableName); - addNewOfflineSegment(segmentMetadata, assignedInstances); + addNewOfflineSegment(rawTableName, segmentMetadata, assignedInstances); LOGGER.info("Added segment: {} of table: {} to ideal state", segmentName, offlineTableName); } @@ -1617,9 +1618,8 @@ public class PinotHelixResourceManager { return ZKMetadataProvider.setOfflineSegmentZKMetadata(_propertyStore, segmentMetadata); } - public void refreshSegment(@Nonnull SegmentMetadata segmentMetadata, + public void refreshSegment(@Nonnull String offlineTableName, @Nonnull SegmentMetadata segmentMetadata, @Nonnull OfflineSegmentZKMetadata offlineSegmentZKMetadata) { - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(segmentMetadata.getTableName()); String segmentName = segmentMetadata.getName(); // NOTE: must first set the segment ZK metadata before trying to refresh because server will pick up the @@ -1808,11 +1808,12 @@ public class PinotHelixResourceManager { /** * Gets assigned instances for uploading new segment. + * @param rawTableName Raw table name without type * @param segmentMetadata segment metadata * @return a list of assigned instances. */ - public List<String> getAssignedInstancesForSegment(SegmentMetadata segmentMetadata) { - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(segmentMetadata.getTableName()); + public List<String> getAssignedInstancesForSegment(String rawTableName, SegmentMetadata segmentMetadata) { + String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName); TableConfig offlineTableConfig = ZKMetadataProvider.getOfflineTableConfig(_propertyStore, offlineTableName); Preconditions.checkNotNull(offlineTableConfig); int numReplicas = Integer.parseInt(offlineTableConfig.getValidationConfig().getReplication()); @@ -1820,8 +1821,8 @@ public class PinotHelixResourceManager { SegmentAssignmentStrategy segmentAssignmentStrategy = SegmentAssignmentStrategyFactory .getSegmentAssignmentStrategy(offlineTableConfig.getValidationConfig().getSegmentAssignmentStrategy()); return segmentAssignmentStrategy - .getAssignedInstances(_helixZkManager, _helixAdmin, _propertyStore, _helixClusterName, segmentMetadata, - numReplicas, serverTenant); + .getAssignedInstances(_helixZkManager, _helixAdmin, _propertyStore, _helixClusterName, offlineTableName, + segmentMetadata, numReplicas, serverTenant); } /** @@ -1831,12 +1832,14 @@ public class PinotHelixResourceManager { * segment assignment strategy and replicas in the table config in the property-store. * - Updates ideal state such that the new segment is assigned to required set of instances as per * the segment assignment strategy and replicas. - * + * @param rawTableName Raw table name without type * @param segmentMetadata Meta-data for the segment, used to access segmentName and tableName. + * @param assignedInstances Instances that are assigned to the segment */ // NOTE: method should be thread-safe - private void addNewOfflineSegment(SegmentMetadata segmentMetadata, List<String> assignedInstances) { - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(segmentMetadata.getTableName()); + private void addNewOfflineSegment(String rawTableName, SegmentMetadata segmentMetadata, + List<String> assignedInstances) { + String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName); String segmentName = segmentMetadata.getName(); // Assign new segment to instances diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BalanceNumSegmentAssignmentStrategy.java b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BalanceNumSegmentAssignmentStrategy.java index 48e9884..6f1bd8d 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BalanceNumSegmentAssignmentStrategy.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BalanceNumSegmentAssignmentStrategy.java @@ -47,9 +47,8 @@ public class BalanceNumSegmentAssignmentStrategy implements SegmentAssignmentStr @Override public List<String> getAssignedInstances(HelixManager helixManager, HelixAdmin helixAdmin, - ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, SegmentMetadata segmentMetadata, - int numReplicas, String tenantName) { - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(segmentMetadata.getTableName()); + ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, String tableNameWithType, + SegmentMetadata segmentMetadata, int numReplicas, String tenantName) { String serverTenantName = TagNameUtils.getOfflineTagForTenant(tenantName); List<String> selectedInstances = new ArrayList<>(); @@ -61,7 +60,7 @@ public class BalanceNumSegmentAssignmentStrategy implements SegmentAssignmentStr } // Count number of segments assigned to each instance - IdealState idealState = helixAdmin.getResourceIdealState(helixClusterName, offlineTableName); + IdealState idealState = helixAdmin.getResourceIdealState(helixClusterName, tableNameWithType); if (idealState != null) { for (String partitionName : idealState.getPartitionSet()) { Map<String, String> instanceToStateMap = idealState.getInstanceStateMap(partitionName); @@ -92,8 +91,8 @@ public class BalanceNumSegmentAssignmentStrategy implements SegmentAssignmentStr selectedInstances.add(priorityQueue.poll().getB()); } - LOGGER.info("Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + segmentMetadata - .getTableName() + ", selected instances: " + Arrays.toString(selectedInstances.toArray())); + LOGGER.info("Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + tableNameWithType + + ", selected instances: " + Arrays.toString(selectedInstances.toArray())); return selectedInstances; } } diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BucketizedSegmentStrategy.java b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BucketizedSegmentStrategy.java index 193bf16..213b751 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BucketizedSegmentStrategy.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/BucketizedSegmentStrategy.java @@ -42,8 +42,8 @@ public class BucketizedSegmentStrategy implements SegmentAssignmentStrategy { @Override public List<String> getAssignedInstances(HelixManager helixManager, HelixAdmin helixAdmin, - ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, SegmentMetadata segmentMetadata, - int numReplicas, String tenantName) { + ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, String tableNameWithType, + SegmentMetadata segmentMetadata, int numReplicas, String tenantName) { String serverTenantName = TagNameUtils.getOfflineTagForTenant(tenantName); List<String> allInstances = HelixHelper.getEnabledInstancesWithTag(helixManager, serverTenantName); @@ -55,8 +55,9 @@ public class BucketizedSegmentStrategy implements SegmentAssignmentStrategy { selectedInstanceList.add(instance); } } - LOGGER.info("Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + segmentMetadata - .getTableName() + ", selected instances: " + Arrays.toString(selectedInstanceList.toArray())); + LOGGER.info( + "Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + tableNameWithType + + ", selected instances: " + Arrays.toString(selectedInstanceList.toArray())); return selectedInstanceList; } else { throw new RuntimeException("Segment missing sharding key!"); diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/RandomAssignmentStrategy.java b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/RandomAssignmentStrategy.java index 818d895..cd7ff57 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/RandomAssignmentStrategy.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/RandomAssignmentStrategy.java @@ -44,8 +44,8 @@ public class RandomAssignmentStrategy implements SegmentAssignmentStrategy { @Override public List<String> getAssignedInstances(HelixManager helixManager, HelixAdmin helixAdmin, - ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, SegmentMetadata segmentMetadata, - int numReplicas, String tenantName) { + ZkHelixPropertyStore<ZNRecord> propertyStore, String tableNameWithType, String helixClusterName, + SegmentMetadata segmentMetadata, int numReplicas, String tenantName) { String serverTenantName = TagNameUtils.getOfflineTagForTenant(tenantName); final Random random = new Random(System.currentTimeMillis()); @@ -56,8 +56,8 @@ public class RandomAssignmentStrategy implements SegmentAssignmentStrategy { selectedInstanceList.add(allInstanceList.get(idx)); allInstanceList.remove(idx); } - LOGGER.info("Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + segmentMetadata - .getTableName() + ", selected instances: " + Arrays.toString(selectedInstanceList.toArray())); + LOGGER.info("Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + tableNameWithType + + ", selected instances: " + Arrays.toString(selectedInstanceList.toArray())); return selectedInstanceList; } diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/ReplicaGroupSegmentAssignmentStrategy.java b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/ReplicaGroupSegmentAssignmentStrategy.java index cd604f5..7e1bd57 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/ReplicaGroupSegmentAssignmentStrategy.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/ReplicaGroupSegmentAssignmentStrategy.java @@ -52,18 +52,17 @@ public class ReplicaGroupSegmentAssignmentStrategy implements SegmentAssignmentS @Override public List<String> getAssignedInstances(HelixManager helixManager, HelixAdmin helixAdmin, - ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, SegmentMetadata segmentMetadata, - int numReplicas, String tenantName) { - String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(segmentMetadata.getTableName()); + ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, String tableNameWithType, + SegmentMetadata segmentMetadata, int numReplicas, String tenantName) { // Fetch the partition mapping table from the property store. ReplicaGroupPartitionAssignmentGenerator partitionAssignmentGenerator = new ReplicaGroupPartitionAssignmentGenerator(propertyStore); ReplicaGroupPartitionAssignment replicaGroupPartitionAssignment = - partitionAssignmentGenerator.getReplicaGroupPartitionAssignment(offlineTableName); + partitionAssignmentGenerator.getReplicaGroupPartitionAssignment(tableNameWithType); // Fetch the segment assignment related configurations. - TableConfig tableConfig = ZKMetadataProvider.getTableConfig(propertyStore, offlineTableName); + TableConfig tableConfig = ZKMetadataProvider.getTableConfig(propertyStore, tableNameWithType); ReplicaGroupStrategyConfig replicaGroupStrategyConfig = tableConfig.getValidationConfig().getReplicaGroupStrategyConfig(); boolean mirrorAssignmentAcrossReplicaGroups = replicaGroupStrategyConfig.getMirrorAssignmentAcrossReplicaGroups(); @@ -98,8 +97,8 @@ public class ReplicaGroupSegmentAssignmentStrategy implements SegmentAssignmentS selectedInstanceList.add(instancesInReplicaGroup.get(index)); } - LOGGER.info("Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + segmentMetadata - .getTableName() + ", selected instances: " + Arrays.toString(selectedInstanceList.toArray())); + LOGGER.info("Segment assignment result for : " + segmentMetadata.getName() + ", in resource : " + tableNameWithType + + ", selected instances: " + Arrays.toString(selectedInstanceList.toArray())); return selectedInstanceList; } diff --git a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategy.java b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategy.java index 9b0cca3..b9ef632 100644 --- a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategy.java +++ b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategy.java @@ -34,8 +34,8 @@ import org.apache.pinot.common.segment.SegmentMetadata; public interface SegmentAssignmentStrategy { List<String> getAssignedInstances(HelixManager helixManager, HelixAdmin helixAdmin, - ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, SegmentMetadata segmentMetadata, - int numReplicas, String tenantName); + ZkHelixPropertyStore<ZNRecord> propertyStore, String helixClusterName, String tableNameWithType, + SegmentMetadata segmentMetadata, int numReplicas, String tenantName); } diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/PinotSegmentRestletResourceTest.java b/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/PinotSegmentRestletResourceTest.java index 3a45019..aaf36a2 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/PinotSegmentRestletResourceTest.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/PinotSegmentRestletResourceTest.java @@ -87,7 +87,7 @@ public class PinotSegmentRestletResourceTest extends ControllerTest { // Upload Segments for (int i = 0; i < 5; ++i) { SegmentMetadata segmentMetadata = SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME); - _helixResourceManager.addNewSegment(segmentMetadata, "downloadUrl"); + _helixResourceManager.addNewSegment(TABLE_NAME, segmentMetadata, "downloadUrl"); segmentMetadataTable.put(segmentMetadata.getName(), segmentMetadata); } @@ -97,7 +97,7 @@ public class PinotSegmentRestletResourceTest extends ControllerTest { // Add more segments for (int i = 0; i < 5; ++i) { SegmentMetadata segmentMetadata = SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME); - _helixResourceManager.addNewSegment(segmentMetadata, "downloadUrl"); + _helixResourceManager.addNewSegment(TABLE_NAME, segmentMetadata, "downloadUrl"); segmentMetadataTable.put(segmentMetadata.getName(), segmentMetadata); } diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/TableViewsTest.java b/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/TableViewsTest.java index ee14650..12103cf 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/TableViewsTest.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/api/resources/TableViewsTest.java @@ -65,9 +65,8 @@ public class TableViewsTest extends ControllerTest { .setNumReplicas(2).build(); Assert.assertEquals(_helixManager.getInstanceType(), InstanceType.CONTROLLER); _helixResourceManager.addTable(tableConfig); - _helixResourceManager - .addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(OFFLINE_TABLE_NAME, OFFLINE_SEGMENT_NAME), - "downloadUrl"); + _helixResourceManager.addNewSegment(OFFLINE_TABLE_NAME, + SegmentMetadataMockUtils.mockSegmentMetadata(OFFLINE_TABLE_NAME, OFFLINE_SEGMENT_NAME), "downloadUrl"); // Create the hybrid table tableConfig = new TableConfig.Builder(CommonConstants.Helix.TableType.OFFLINE).setTableName(HYBRID_TABLE_NAME) diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerInstanceToggleTest.java b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerInstanceToggleTest.java index 25855a8..731fa3f 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerInstanceToggleTest.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerInstanceToggleTest.java @@ -70,7 +70,8 @@ public class ControllerInstanceToggleTest extends ControllerTest { // Add segments for (int i = 0; i < NUM_INSTANCES; i++) { - _helixResourceManager.addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(RAW_TABLE_NAME), "downloadUrl"); + _helixResourceManager + .addNewSegment(RAW_TABLE_NAME, SegmentMetadataMockUtils.mockSegmentMetadata(RAW_TABLE_NAME), "downloadUrl"); Assert.assertEquals(_helixAdmin.getResourceIdealState(_helixClusterName, OFFLINE_TABLE_NAME).getNumPartitions(), i + 1); } diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerSentinelTestV2.java b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerSentinelTestV2.java index 1411f14..97bf934 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerSentinelTestV2.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/ControllerSentinelTestV2.java @@ -71,7 +71,8 @@ public class ControllerSentinelTestV2 extends ControllerTest { Assert .assertEquals(_helixAdmin.getResourceIdealState(_helixClusterName, tableName + "_OFFLINE").getNumPartitions(), i); - _helixResourceManager.addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(tableName), "downloadUrl"); + _helixResourceManager + .addNewSegment(tableName, SegmentMetadataMockUtils.mockSegmentMetadata(tableName), "downloadUrl"); Assert .assertEquals(_helixAdmin.getResourceIdealState(_helixClusterName, tableName + "_OFFLINE").getNumPartitions(), i + 1); diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/PinotResourceManagerTest.java b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/PinotResourceManagerTest.java index e6a29c6..eb57c7f 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/PinotResourceManagerTest.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/PinotResourceManagerTest.java @@ -102,7 +102,8 @@ public class PinotResourceManagerTest extends ControllerTest { // Basic add/delete case for (int i = 1; i <= 2; i++) { - _helixResourceManager.addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME), "downloadUrl"); + _helixResourceManager + .addNewSegment(TABLE_NAME, SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME), "downloadUrl"); } IdealState idealState = _helixAdmin.getResourceIdealState(getHelixClusterName(), offlineTableName); Set<String> segments = idealState.getPartitionSet(); @@ -122,7 +123,7 @@ public class PinotResourceManagerTest extends ControllerTest { public void run() { for (int i = 0; i < 10; i++) { _helixResourceManager - .addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME), "downloadUrl"); + .addNewSegment(TABLE_NAME, SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME), "downloadUrl"); } } }); diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/retention/RetentionManagerTest.java b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/retention/RetentionManagerTest.java index 4f34525..d4adfe1 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/retention/RetentionManagerTest.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/retention/RetentionManagerTest.java @@ -306,7 +306,6 @@ public class RetentionManagerTest { private SegmentMetadata mockSegmentMetadata(long startTime, long endTime, TimeUnit timeUnit) { long creationTime = System.currentTimeMillis(); SegmentMetadata segmentMetadata = mock(SegmentMetadata.class); - when(segmentMetadata.getTableName()).thenReturn(TEST_TABLE_NAME); when(segmentMetadata.getName()).thenReturn(TEST_TABLE_NAME + creationTime); when(segmentMetadata.getIndexCreationTime()).thenReturn(creationTime); when(segmentMetadata.getCrc()).thenReturn(Long.toString(creationTime)); diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategyTest.java b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategyTest.java index 3c96a20..836824c 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategyTest.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/helix/core/sharding/SegmentAssignmentStrategyTest.java @@ -118,7 +118,8 @@ public class SegmentAssignmentStrategyTest extends ControllerTest { for (int i = 0; i < 10; ++i) { _helixResourceManager - .addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME_RANDOM), "downloadUrl"); + .addNewSegment(TABLE_NAME_RANDOM, SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME_RANDOM), + "downloadUrl"); // Wait for all segments appear in the external view while (!allSegmentsPushedToIdealState(TABLE_NAME_RANDOM, i + 1)) { @@ -153,7 +154,8 @@ public class SegmentAssignmentStrategyTest extends ControllerTest { int numSegments = 20; for (int i = 0; i < numSegments; ++i) { _helixResourceManager - .addNewSegment(SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME_BALANCED), "downloadUrl"); + .addNewSegment(TABLE_NAME_BALANCED, SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME_BALANCED), + "downloadUrl"); } // Wait for all segments appear in the external view diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/utils/ReplicaGroupTestUtils.java b/pinot-controller/src/test/java/org/apache/pinot/controller/utils/ReplicaGroupTestUtils.java index 31466d6..603726f 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/utils/ReplicaGroupTestUtils.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/utils/ReplicaGroupTestUtils.java @@ -45,7 +45,7 @@ public class ReplicaGroupTestUtils { String segmentName = SEGMENT_PREFIX + i; SegmentMetadata segmentMetadata = SegmentMetadataMockUtils .mockSegmentMetadataWithPartitionInfo(tableName, segmentName, partitionColumn, partition); - resourceManager.addNewSegment(segmentMetadata, "downloadUrl"); + resourceManager.addNewSegment(tableName, segmentMetadata, "downloadUrl"); if (!segmentsPerPartition.containsKey(partition)) { segmentsPerPartition.put(partition, new HashSet<String>()); } @@ -58,7 +58,7 @@ public class ReplicaGroupTestUtils { String partitionColumn, PinotHelixResourceManager resourceManager) { SegmentMetadata segmentMetadata = SegmentMetadataMockUtils.mockSegmentMetadataWithPartitionInfo(tableName, segmentName, partitionColumn, 0); - resourceManager.addNewSegment(segmentMetadata, "downloadUrl"); + resourceManager.addNewSegment(tableName, segmentMetadata, "downloadUrl"); } /** diff --git a/pinot-controller/src/test/java/org/apache/pinot/controller/validation/ValidationManagerTest.java b/pinot-controller/src/test/java/org/apache/pinot/controller/validation/ValidationManagerTest.java index e039d73..55c13a5 100644 --- a/pinot-controller/src/test/java/org/apache/pinot/controller/validation/ValidationManagerTest.java +++ b/pinot-controller/src/test/java/org/apache/pinot/controller/validation/ValidationManagerTest.java @@ -128,7 +128,7 @@ public class ValidationManagerTest extends ControllerTest { throws Exception { SegmentMetadata segmentMetadata = SegmentMetadataMockUtils.mockSegmentMetadata(TEST_TABLE_NAME, TEST_SEGMENT_NAME); - _helixResourceManager.addNewSegment(segmentMetadata, "http://dummy/"); + _helixResourceManager.addNewSegment(TEST_TABLE_NAME, segmentMetadata, "http://dummy/"); OfflineSegmentZKMetadata offlineSegmentZKMetadata = _helixResourceManager.getOfflineSegmentZKMetadata(TEST_TABLE_NAME, TEST_SEGMENT_NAME); long pushTime = offlineSegmentZKMetadata.getPushTime(); @@ -139,7 +139,7 @@ public class ValidationManagerTest extends ControllerTest { // Refresh the segment Mockito.when(segmentMetadata.getCrc()).thenReturn(Long.toString(System.nanoTime())); - _helixResourceManager.refreshSegment(segmentMetadata, offlineSegmentZKMetadata); + _helixResourceManager.refreshSegment(TEST_TABLE_NAME, segmentMetadata, offlineSegmentZKMetadata); offlineSegmentZKMetadata = _helixResourceManager.getOfflineSegmentZKMetadata(TEST_TABLE_NAME, TEST_SEGMENT_NAME); diff --git a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java index 2ddb80c..e78ae37 100644 --- a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java +++ b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java @@ -135,9 +135,8 @@ public class ImmutableSegmentLoader { FieldSpec fieldSpec = schema.getFieldSpecFor(columnName); VirtualColumnProvider provider = VirtualColumnProviderFactory.buildProvider(fieldSpec.getVirtualColumnProvider()); - VirtualColumnContext context = - new VirtualColumnContext(NetUtil.getHostnameOrAddress(), segmentMetadata.getTableName(), segmentName, - columnName, segmentMetadata.getTotalDocs()); + VirtualColumnContext context = new VirtualColumnContext(NetUtil.getHostnameOrAddress(), segmentName, columnName, + segmentMetadata.getTotalDocs()); indexContainerMap.put(columnName, provider.buildColumnIndexContainer(context)); segmentMetadata.getColumnMetadataMap().put(columnName, provider.buildMetadata(context)); } diff --git a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java index 0c1112c..3a09bbb 100644 --- a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java +++ b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java @@ -431,8 +431,7 @@ public class MutableSegmentImpl implements MutableSegment { private ColumnDataSource getVirtualDataSource(String column) { VirtualColumnContext virtualColumnContext = - new VirtualColumnContext(NetUtil.getHostnameOrAddress(), _segmentMetadata.getTableName(), getSegmentName(), - column, _numDocsIndexed + 1); + new VirtualColumnContext(NetUtil.getHostnameOrAddress(), getSegmentName(), column, _numDocsIndexed + 1); VirtualColumnProvider provider = VirtualColumnProviderFactory.buildProvider(_schema.getFieldSpecFor(column).getVirtualColumnProvider()); return new ColumnDataSource(provider.buildColumnIndexContainer(virtualColumnContext), diff --git a/pinot-core/src/main/java/org/apache/pinot/core/minion/BackfillDateTimeColumn.java b/pinot-core/src/main/java/org/apache/pinot/core/minion/BackfillDateTimeColumn.java index 9d786b0..9ae4782 100644 --- a/pinot-core/src/main/java/org/apache/pinot/core/minion/BackfillDateTimeColumn.java +++ b/pinot-core/src/main/java/org/apache/pinot/core/minion/BackfillDateTimeColumn.java @@ -56,14 +56,16 @@ import org.slf4j.LoggerFactory; public class BackfillDateTimeColumn { private static final Logger LOGGER = LoggerFactory.getLogger(BackfillDateTimeColumn.class); + private final String _rawTableName; private final File _originalIndexDir; private final File _backfilledIndexDir; private final TimeFieldSpec _srcTimeFieldSpec; private final DateTimeFieldSpec _destDateTimeFieldSpec; - public BackfillDateTimeColumn(@Nonnull File originalIndexDir, @Nonnull File backfilledIndexDir, + public BackfillDateTimeColumn(@Nonnull String rawTableName, @Nonnull File originalIndexDir, @Nonnull File backfilledIndexDir, @Nonnull TimeFieldSpec srcTimeSpec, @Nonnull DateTimeFieldSpec destDateTimeSpec) throws Exception { + _rawTableName = rawTableName; _originalIndexDir = originalIndexDir; _backfilledIndexDir = backfilledIndexDir; Preconditions.checkArgument(!_originalIndexDir.getAbsolutePath().equals(_backfilledIndexDir.getAbsolutePath()), @@ -76,8 +78,7 @@ public class BackfillDateTimeColumn { throws Exception { SegmentMetadataImpl originalSegmentMetadata = new SegmentMetadataImpl(_originalIndexDir); String segmentName = originalSegmentMetadata.getName(); - String tableName = originalSegmentMetadata.getTableName(); - LOGGER.info("Start backfilling segment: {} in table: {}", segmentName, tableName); + LOGGER.info("Start backfilling segment: {} in table: {}", segmentName, _rawTableName); PinotSegmentRecordReader segmentRecordReader = new PinotSegmentRecordReader(_originalIndexDir); BackfillDateTimeRecordReader wrapperReader = @@ -90,7 +91,7 @@ public class BackfillDateTimeColumn { config.setFormat(FileFormat.PINOT); config.setOutDir(_backfilledIndexDir.getAbsolutePath()); config.setOverwrite(true); - config.setTableName(tableName); + config.setTableName(_rawTableName); config.setSegmentName(segmentName); config.setSchema(wrapperReader.getSchema()); diff --git a/pinot-core/src/main/java/org/apache/pinot/core/minion/RawIndexConverter.java b/pinot-core/src/main/java/org/apache/pinot/core/minion/RawIndexConverter.java index d12aaf5..96fc637 100644 --- a/pinot-core/src/main/java/org/apache/pinot/core/minion/RawIndexConverter.java +++ b/pinot-core/src/main/java/org/apache/pinot/core/minion/RawIndexConverter.java @@ -73,6 +73,7 @@ public class RawIndexConverter { // BITS_PER_ELEMENT is not applicable for raw index private static final int BITS_PER_ELEMENT_FOR_RAW_INDEX = -1; + private final String _rawTableName; private final ImmutableSegment _originalImmutableSegment; private final SegmentMetadataImpl _originalSegmentMetadata; private final File _convertedIndexDir; @@ -83,13 +84,14 @@ public class RawIndexConverter { * NOTE: original segment should be in V1 format. * TODO: support V3 format */ - public RawIndexConverter(@Nonnull File originalIndexDir, @Nonnull File convertedIndexDir, - @Nullable String columnsToConvert) + public RawIndexConverter(@Nonnull String rawTableName, @Nonnull File originalIndexDir, + @Nonnull File convertedIndexDir, @Nullable String columnsToConvert) throws Exception { FileUtils.copyDirectory(originalIndexDir, convertedIndexDir); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(); indexLoadingConfig.setSegmentVersion(SegmentVersion.v1); indexLoadingConfig.setReadMode(ReadMode.mmap); + _rawTableName = rawTableName; _originalImmutableSegment = ImmutableSegmentLoader.load(originalIndexDir, indexLoadingConfig); _originalSegmentMetadata = (SegmentMetadataImpl) _originalImmutableSegment.getSegmentMetadata(); _convertedIndexDir = convertedIndexDir; @@ -101,8 +103,7 @@ public class RawIndexConverter { public boolean convert() throws Exception { String segmentName = _originalSegmentMetadata.getName(); - String tableName = _originalSegmentMetadata.getTableName(); - LOGGER.info("Start converting segment: {} in table: {}", segmentName, tableName); + LOGGER.info("Start converting segment: {} in table: {}", segmentName, _rawTableName); List<FieldSpec> columnsToConvert = new ArrayList<>(); Schema schema = _originalSegmentMetadata.getSchema(); @@ -134,7 +135,7 @@ public class RawIndexConverter { } if (columnsToConvert.isEmpty()) { - LOGGER.info("No column converted for segment: {} in table: {}", segmentName, tableName); + LOGGER.info("No column converted for segment: {} in table: {}", segmentName, _rawTableName); return false; } else { // Convert columns @@ -148,7 +149,8 @@ public class RawIndexConverter { .persistCreationMeta(_convertedIndexDir, CrcUtils.forAllFilesInFolder(_convertedIndexDir).computeCrc(), _originalSegmentMetadata.getIndexCreationTime()); - LOGGER.info("{} columns converted for segment: {} in table: {}", columnsToConvert.size(), segmentName, tableName); + LOGGER.info("{} columns converted for segment: {} in table: {}", columnsToConvert.size(), segmentName, + _rawTableName); return true; } } diff --git a/pinot-core/src/main/java/org/apache/pinot/core/minion/SegmentPurger.java b/pinot-core/src/main/java/org/apache/pinot/core/minion/SegmentPurger.java index 3e95b7e..deaf00f 100644 --- a/pinot-core/src/main/java/org/apache/pinot/core/minion/SegmentPurger.java +++ b/pinot-core/src/main/java/org/apache/pinot/core/minion/SegmentPurger.java @@ -49,6 +49,7 @@ import org.slf4j.LoggerFactory; public class SegmentPurger { private static final Logger LOGGER = LoggerFactory.getLogger(SegmentPurger.class); + private final String _rawTableName; private final File _originalIndexDir; private final File _workingDir; private final RecordPurger _recordPurger; @@ -57,10 +58,11 @@ public class SegmentPurger { private int _numRecordsPurged; private int _numRecordsModified; - public SegmentPurger(@Nonnull File originalIndexDir, @Nonnull File workingDir, @Nullable RecordPurger recordPurger, - @Nullable RecordModifier recordModifier) { + public SegmentPurger(@Nonnull String rawTableName, @Nonnull File originalIndexDir, @Nonnull File workingDir, + @Nullable RecordPurger recordPurger, @Nullable RecordModifier recordModifier) { Preconditions.checkArgument(recordPurger != null || recordModifier != null, "At least one of record purger and modifier should be non-null"); + _rawTableName = rawTableName; _originalIndexDir = originalIndexDir; _workingDir = workingDir; _recordPurger = recordPurger; @@ -70,9 +72,8 @@ public class SegmentPurger { public File purgeSegment() throws Exception { SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(_originalIndexDir); - String tableName = segmentMetadata.getTableName(); String segmentName = segmentMetadata.getName(); - LOGGER.info("Start purging table: {}, segment: {}", tableName, segmentName); + LOGGER.info("Start purging table: {}, segment: {}", _rawTableName, segmentName); try (PurgeRecordReader purgeRecordReader = new PurgeRecordReader()) { // Make a first pass through the data to see if records need to be purged or modified @@ -88,7 +89,7 @@ public class SegmentPurger { Schema schema = purgeRecordReader.getSchema(); SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema); config.setOutDir(_workingDir.getPath()); - config.setTableName(tableName); + config.setTableName(_rawTableName); config.setSegmentName(segmentName); // Keep index creation time the same as original segment because both segments use the same raw data. @@ -133,8 +134,8 @@ public class SegmentPurger { driver.build(); } - LOGGER.info("Finish purging table: {}, segment: {}, purged {} records, modified {} records", tableName, segmentName, - _numRecordsPurged, _numRecordsModified); + LOGGER.info("Finish purging table: {}, segment: {}, purged {} records, modified {} records", _rawTableName, + segmentName, _numRecordsPurged, _numRecordsModified); return new File(_workingDir, segmentName); } diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/VirtualColumnContext.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/VirtualColumnContext.java index aa61d18..0b291d4 100644 --- a/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/VirtualColumnContext.java +++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/VirtualColumnContext.java @@ -23,15 +23,13 @@ package org.apache.pinot.core.segment.virtualcolumn; */ public class VirtualColumnContext { private String _hostname; - private String _tableName; private String _segmentName; private String _columnName; private int _totalDocCount; - public VirtualColumnContext(String hostname, String tableName, String segmentName, String columnName, + public VirtualColumnContext(String hostname, String segmentName, String columnName, int totalDocCount) { _hostname = hostname; - _tableName = tableName; _segmentName = segmentName; _columnName = columnName; _totalDocCount = totalDocCount; @@ -41,10 +39,6 @@ public class VirtualColumnContext { return _hostname; } - public String getTableName() { - return _tableName; - } - public String getSegmentName() { return _segmentName; } diff --git a/pinot-core/src/test/java/org/apache/pinot/core/data/readers/BackfillDateTimeRecordReaderTest.java b/pinot-core/src/test/java/org/apache/pinot/core/data/readers/BackfillDateTimeRecordReaderTest.java index 7f01c5c..7beba50 100644 --- a/pinot-core/src/test/java/org/apache/pinot/core/data/readers/BackfillDateTimeRecordReaderTest.java +++ b/pinot-core/src/test/java/org/apache/pinot/core/data/readers/BackfillDateTimeRecordReaderTest.java @@ -49,7 +49,7 @@ import org.testng.annotations.Test; */ public class BackfillDateTimeRecordReaderTest { private static final int NUM_ROWS = 10000; - + private static final String TABLE_NAME = "myTable"; private static String D1 = "d1"; private static String D2 = "d2"; private static String M1 = "m1"; @@ -138,7 +138,8 @@ public class BackfillDateTimeRecordReaderTest { DateTimeFieldSpec dateTimeFieldSpec, Schema schemaExpected) throws Exception { BackfillDateTimeColumn backfillDateTimeColumn = - new BackfillDateTimeColumn(new File("original"), new File("backup"), timeFieldSpec, dateTimeFieldSpec); + new BackfillDateTimeColumn(TABLE_NAME, new File("original"), new File("backup"), timeFieldSpec, + dateTimeFieldSpec); try (BackfillDateTimeRecordReader wrapperReader = backfillDateTimeColumn .getBackfillDateTimeRecordReader(baseRecordReader)) { diff --git a/pinot-core/src/test/java/org/apache/pinot/core/minion/SegmentPurgerTest.java b/pinot-core/src/test/java/org/apache/pinot/core/minion/SegmentPurgerTest.java index 81e41ac..93feea6 100644 --- a/pinot-core/src/test/java/org/apache/pinot/core/minion/SegmentPurgerTest.java +++ b/pinot-core/src/test/java/org/apache/pinot/core/minion/SegmentPurgerTest.java @@ -116,7 +116,7 @@ public class SegmentPurgerTest { }; SegmentPurger segmentPurger = - new SegmentPurger(_originalIndexDir, PURGED_SEGMENT_DIR, recordPurger, recordModifier); + new SegmentPurger(TABLE_NAME, _originalIndexDir, PURGED_SEGMENT_DIR, recordPurger, recordModifier); File purgedIndexDir = segmentPurger.purgeSegment(); // Check the purge/modify counter in segment purger diff --git a/pinot-minion/src/main/java/org/apache/pinot/minion/executor/ConvertToRawIndexTaskExecutor.java b/pinot-minion/src/main/java/org/apache/pinot/minion/executor/ConvertToRawIndexTaskExecutor.java index 8930bb1..6e1b4b8 100644 --- a/pinot-minion/src/main/java/org/apache/pinot/minion/executor/ConvertToRawIndexTaskExecutor.java +++ b/pinot-minion/src/main/java/org/apache/pinot/minion/executor/ConvertToRawIndexTaskExecutor.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.Map; import javax.annotation.Nonnull; import org.apache.pinot.common.config.PinotTaskConfig; +import org.apache.pinot.common.config.TableNameBuilder; import org.apache.pinot.common.metadata.segment.SegmentZKMetadataCustomMapModifier; import org.apache.pinot.core.common.MinionConstants; import org.apache.pinot.core.minion.RawIndexConverter; @@ -35,7 +36,9 @@ public class ConvertToRawIndexTaskExecutor extends BaseSingleSegmentConversionEx @Nonnull File workingDir) throws Exception { Map<String, String> configs = pinotTaskConfig.getConfigs(); - new RawIndexConverter(originalIndexDir, workingDir, + String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY); + String rawTableName = TableNameBuilder.extractRawTableName(tableNameWithType); + new RawIndexConverter(rawTableName, originalIndexDir, workingDir, configs.get(MinionConstants.ConvertToRawIndexTask.COLUMNS_TO_CONVERT_KEY)).convert(); return new SegmentConversionResult.Builder().setFile(workingDir) .setTableNameWithType(configs.get(MinionConstants.TABLE_NAME_KEY)) diff --git a/pinot-minion/src/main/java/org/apache/pinot/minion/executor/PurgeTaskExecutor.java b/pinot-minion/src/main/java/org/apache/pinot/minion/executor/PurgeTaskExecutor.java index 9c775a7..3f39a14 100644 --- a/pinot-minion/src/main/java/org/apache/pinot/minion/executor/PurgeTaskExecutor.java +++ b/pinot-minion/src/main/java/org/apache/pinot/minion/executor/PurgeTaskExecutor.java @@ -49,7 +49,7 @@ public class PurgeTaskExecutor extends BaseSingleSegmentConversionExecutor { SegmentPurger.RecordModifierFactory recordModifierFactory = MINION_CONTEXT.getRecordModifierFactory(); SegmentPurger.RecordModifier recordModifier = recordModifierFactory != null ? recordModifierFactory.getRecordModifier(rawTableName) : null; - SegmentPurger segmentPurger = new SegmentPurger(originalIndexDir, workingDir, recordPurger, recordModifier); + SegmentPurger segmentPurger = new SegmentPurger(rawTableName, originalIndexDir, workingDir, recordPurger, recordModifier); File purgedSegmentFile = segmentPurger.purgeSegment(); if (purgedSegmentFile == null) { diff --git a/pinot-perf/src/main/java/org/apache/pinot/perf/BenchmarkQueryEngine.java b/pinot-perf/src/main/java/org/apache/pinot/perf/BenchmarkQueryEngine.java index 2997237..6524df5 100644 --- a/pinot-perf/src/main/java/org/apache/pinot/perf/BenchmarkQueryEngine.java +++ b/pinot-perf/src/main/java/org/apache/pinot/perf/BenchmarkQueryEngine.java @@ -102,16 +102,12 @@ public class BenchmarkQueryEngine { _perfBenchmarkDriver = new PerfBenchmarkDriver(conf); _perfBenchmarkDriver.run(); - Set<String> tables = new HashSet<String>(); File[] segments = new File(DATA_DIRECTORY, TABLE_NAME).listFiles(); for (File segmentDir : segments) { SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(segmentDir); - if (!tables.contains(segmentMetadata.getTableName())) { - _perfBenchmarkDriver.configureTable(segmentMetadata.getTableName()); - tables.add(segmentMetadata.getTableName()); - } + _perfBenchmarkDriver.configureTable(TABLE_NAME); System.out.println("Adding segment " + segmentDir.getAbsolutePath()); - _perfBenchmarkDriver.addSegment(segmentMetadata); + _perfBenchmarkDriver.addSegment(TABLE_NAME, segmentMetadata); } ZkClient client = new ZkClient("localhost:2191", 10000, 10000, new ZNRecordSerializer()); diff --git a/pinot-tools/src/main/java/org/apache/pinot/tools/admin/command/BackfillDateTimeColumnCommand.java b/pinot-tools/src/main/java/org/apache/pinot/tools/admin/command/BackfillDateTimeColumnCommand.java index 2809a50..7aa5cd7 100644 --- a/pinot-tools/src/main/java/org/apache/pinot/tools/admin/command/BackfillDateTimeColumnCommand.java +++ b/pinot-tools/src/main/java/org/apache/pinot/tools/admin/command/BackfillDateTimeColumnCommand.java @@ -21,6 +21,7 @@ package org.apache.pinot.tools.admin.command; import java.io.File; import java.util.ArrayList; import java.util.List; +import org.apache.pinot.common.config.TableNameBuilder; import org.apache.pinot.common.data.DateTimeFieldSpec; import org.apache.pinot.common.data.TimeFieldSpec; import org.apache.pinot.common.utils.CommonConstants.Segment.SegmentType; @@ -196,10 +197,11 @@ public class BackfillDateTimeColumnCommand extends AbstractBaseAdminCommand impl } // create new segment + String rawTableName = TableNameBuilder.extractRawTableName(_tableName); File segmentDir = new File(downloadSegmentDir, segmentName); File outputDir = new File(downloadSegmentDir, OUTPUT_FOLDER); BackfillDateTimeColumn backfillDateTimeColumn = - new BackfillDateTimeColumn(segmentDir, outputDir, timeFieldSpec, dateTimeFieldSpec); + new BackfillDateTimeColumn(rawTableName, segmentDir, outputDir, timeFieldSpec, dateTimeFieldSpec); boolean backfillStatus = backfillDateTimeColumn.backfill(); LOGGER .info("Backfill status for segment {} in {} to {} is {}", segmentName, segmentDir, outputDir, backfillStatus); diff --git a/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkDriver.java b/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkDriver.java index 6d7928d..967f006 100644 --- a/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkDriver.java +++ b/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkDriver.java @@ -319,9 +319,10 @@ public class PerfBenchmarkDriver { * * @param segmentMetadata segment metadata. */ - public void addSegment(SegmentMetadata segmentMetadata) { + public void addSegment(String tableName, SegmentMetadata segmentMetadata) { + String rawTableName = TableNameBuilder.extractRawTableName(tableName); _helixResourceManager - .addNewSegment(segmentMetadata, "http://" + _controllerAddress + "/" + segmentMetadata.getName()); + .addNewSegment(rawTableName, segmentMetadata, "http://" + _controllerAddress + "/" + segmentMetadata.getName()); } public static void waitForExternalViewUpdate(String zkAddress, final String clusterName, long timeoutInMilliseconds) { diff --git a/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkRunner.java b/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkRunner.java index 0ee0f56..22902db 100644 --- a/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkRunner.java +++ b/pinot-tools/src/main/java/org/apache/pinot/tools/perf/PerfBenchmarkRunner.java @@ -163,10 +163,10 @@ public class PerfBenchmarkRunner extends AbstractBaseCommand implements Command for (File segment : segments) { SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(segment); if (!tableConfigured) { - driver.configureTable(segmentMetadata.getTableName(), invertedIndexColumns, bloomFilterColumns); + driver.configureTable(tableName, invertedIndexColumns, bloomFilterColumns); tableConfigured = true; } - driver.addSegment(segmentMetadata); + driver.addSegment(tableName, segmentMetadata); } } diff --git a/pinot-tools/src/main/java/org/apache/pinot/tools/query/comparison/StarTreeQueryGenerator.java b/pinot-tools/src/main/java/org/apache/pinot/tools/query/comparison/StarTreeQueryGenerator.java index e471428..44be668 100644 --- a/pinot-tools/src/main/java/org/apache/pinot/tools/query/comparison/StarTreeQueryGenerator.java +++ b/pinot-tools/src/main/java/org/apache/pinot/tools/query/comparison/StarTreeQueryGenerator.java @@ -324,12 +324,13 @@ public class StarTreeQueryGenerator { public static void main(String[] args) throws Exception { if (args.length != 2) { - System.err.println("Usage: StarTreeQueryGenerator starTreeSegmentsDirectory numQueries"); + System.err.println("Usage: StarTreeQueryGenerator tableName starTreeSegmentsDirectory numQueries"); return; } // Get segment metadata for the first segment to get table name and verify query is fit for star tree. - File segmentsDir = new File(args[0]); + String tableName = args[0]; + File segmentsDir = new File(args[1]); Preconditions.checkState(segmentsDir.exists()); Preconditions.checkState(segmentsDir.isDirectory()); File[] segments = segmentsDir.listFiles(); @@ -337,11 +338,10 @@ public class StarTreeQueryGenerator { File segment = segments[0]; IndexSegment indexSegment = ImmutableSegmentLoader.load(segment, ReadMode.heap); SegmentMetadata segmentMetadata = indexSegment.getSegmentMetadata(); - String tableName = segmentMetadata.getTableName(); // Set up star tree query generator. - int numQueries = Integer.parseInt(args[1]); - SegmentInfoProvider infoProvider = new SegmentInfoProvider(args[0]); + int numQueries = Integer.parseInt(args[2]); + SegmentInfoProvider infoProvider = new SegmentInfoProvider(args[1]); StarTreeQueryGenerator generator = new StarTreeQueryGenerator(tableName, infoProvider.getSingleValueDimensionColumns(), infoProvider.getMetricColumns(), infoProvider.getSingleValueDimensionValuesMap()); diff --git a/pinot-tools/src/main/java/org/apache/pinot/tools/realtime/provisioning/MemoryEstimator.java b/pinot-tools/src/main/java/org/apache/pinot/tools/realtime/provisioning/MemoryEstimator.java index 7ec4af2..52ba2b4 100644 --- a/pinot-tools/src/main/java/org/apache/pinot/tools/realtime/provisioning/MemoryEstimator.java +++ b/pinot-tools/src/main/java/org/apache/pinot/tools/realtime/provisioning/MemoryEstimator.java @@ -36,7 +36,6 @@ import org.apache.pinot.core.io.readerwriter.RealtimeIndexOffHeapMemoryManager; import org.apache.pinot.core.io.writer.impl.DirectMemoryManager; import org.apache.pinot.core.realtime.impl.RealtimeSegmentConfig; import org.apache.pinot.core.realtime.impl.RealtimeSegmentStatsHistory; -import org.apache.pinot.core.realtime.stream.StreamMessageMetadata; import org.apache.pinot.core.segment.index.SegmentMetadataImpl; @@ -51,6 +50,7 @@ public class MemoryEstimator { private static final String STATS_FILE_COPY_NAME = "stats.copy.ser"; private final TableConfig _tableConfig; + private final String _tableNameWithType; private final File _sampleCompletedSegment; private final long _sampleSegmentConsumedSeconds; private final long _maxUsableHostMemory; @@ -69,6 +69,7 @@ public class MemoryEstimator { public MemoryEstimator(TableConfig tableConfig, File sampleCompletedSegment, long sampleSegmentConsumedSeconds, long maxUsableHostMemory) { _maxUsableHostMemory = maxUsableHostMemory; _tableConfig = tableConfig; + _tableNameWithType = tableConfig.getTableName(); _sampleCompletedSegment = sampleCompletedSegment; _sampleSegmentConsumedSeconds = sampleSegmentConsumedSeconds; @@ -87,7 +88,7 @@ public class MemoryEstimator { } _avgMultiValues = getAvgMultiValues(); - _tableDataDir = new File(TMP_DIR, _segmentMetadata.getTableName()); + _tableDataDir = new File(TMP_DIR, _tableNameWithType); try { FileUtils.deleteDirectory(_tableDataDir); } catch (IOException e) { @@ -120,7 +121,7 @@ public class MemoryEstimator { // create a config RealtimeSegmentConfig.Builder realtimeSegmentConfigBuilder = new RealtimeSegmentConfig.Builder().setSegmentName(_segmentMetadata.getName()) - .setStreamName(_segmentMetadata.getTableName()).setSchema(_segmentMetadata.getSchema()) + .setStreamName(_tableNameWithType).setSchema(_segmentMetadata.getSchema()) .setCapacity(_segmentMetadata.getTotalDocs()).setAvgNumMultiValues(_avgMultiValues) .setNoDictionaryColumns(_noDictionaryColumns).setInvertedIndexColumns(_invertedIndexColumns) .setRealtimeSegmentZKMetadata(segmentZKMetadata).setOffHeap(true).setMemoryManager(memoryManager) @@ -218,7 +219,7 @@ public class MemoryEstimator { RealtimeSegmentConfig.Builder realtimeSegmentConfigBuilder = new RealtimeSegmentConfig.Builder().setSegmentName(_segmentMetadata.getName()) - .setStreamName(_segmentMetadata.getTableName()).setSchema(_segmentMetadata.getSchema()) + .setStreamName(_tableNameWithType).setSchema(_segmentMetadata.getSchema()) .setCapacity(totalDocs).setAvgNumMultiValues(_avgMultiValues).setNoDictionaryColumns(_noDictionaryColumns) .setInvertedIndexColumns(_invertedIndexColumns).setRealtimeSegmentZKMetadata(segmentZKMetadata) .setOffHeap(true).setMemoryManager(memoryManager).setStatsHistory(statsHistory); @@ -317,7 +318,6 @@ public class MemoryEstimator { realtimeSegmentZKMetadata.setStartTime(segmentMetadata.getStartTime()); realtimeSegmentZKMetadata.setEndTime(segmentMetadata.getEndTime()); realtimeSegmentZKMetadata.setCreationTime(segmentMetadata.getIndexCreationTime()); - realtimeSegmentZKMetadata.setTableName(segmentMetadata.getTableName()); realtimeSegmentZKMetadata.setSegmentName(segmentMetadata.getName()); realtimeSegmentZKMetadata.setTimeUnit(segmentMetadata.getTimeUnit()); realtimeSegmentZKMetadata.setTotalRawDocs(totalDocs); diff --git a/pinot-tools/src/main/java/org/apache/pinot/tools/scan/query/SegmentQueryProcessor.java b/pinot-tools/src/main/java/org/apache/pinot/tools/scan/query/SegmentQueryProcessor.java index 62d684e..a218a13 100644 --- a/pinot-tools/src/main/java/org/apache/pinot/tools/scan/query/SegmentQueryProcessor.java +++ b/pinot-tools/src/main/java/org/apache/pinot/tools/scan/query/SegmentQueryProcessor.java @@ -54,7 +54,6 @@ class SegmentQueryProcessor { private final SegmentMetadataImpl _metadata; private final ImmutableSegment _immutableSegment; - private final String _tableName; private final String _segmentName; private final int _totalDocs; @@ -64,7 +63,6 @@ class SegmentQueryProcessor { _immutableSegment = ImmutableSegmentLoader.load(_segmentDir, ReadMode.mmap); _metadata = new SegmentMetadataImpl(_segmentDir); - _tableName = _metadata.getTableName(); _segmentName = _metadata.getName(); _totalDocs = _metadata.getTotalDocs(); @@ -139,12 +137,6 @@ class SegmentQueryProcessor { } private boolean pruneSegment(BrokerRequest brokerRequest) { - // Check if segment belongs to the table being queried. - if (!_tableName.equals(brokerRequest.getQuerySource().getTableName())) { - LOGGER.debug("Skipping segment {} from different table {}", _segmentName, _tableName); - return true; - } - // Check if any column in the query does not exist in the segment. Set<String> allColumns = _metadata.getAllColumns(); if (brokerRequest.isSetAggregationsInfo()) { --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org For additional commands, e-mail: commits-h...@pinot.apache.org