From eb54468a303ee33bc4e79499f3b7325a2adca15b Mon Sep 17 00:00:00 2001 From: zdeng Date: Sun, 1 Feb 2026 10:03:42 +0800 Subject: [PATCH 1/9] HIVE-29430: Extract get partitions from HMSHandler --- .../hadoop/hive/metastore/HMSHandler.java | 739 +++++------------- .../hadoop/hive/metastore/IHMSHandler.java | 2 + .../client/builder/GetPartitionsArgs.java | 13 + .../handler/AbstractRequestHandler.java | 1 + .../handler/GetPartitionsHandler.java | 477 +++++++++++ .../hive/metastore/handler/TAbstractBase.java | 71 ++ 6 files changed, 761 insertions(+), 542 deletions(-) create mode 100644 standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java create mode 100644 standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TAbstractBase.java diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 6065574d9690..98908ef2403b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.handler.AbstractRequestHandler; import org.apache.hadoop.hive.metastore.handler.AddPartitionsHandler; import org.apache.hadoop.hive.metastore.handler.DropPartitionsHandler; +import org.apache.hadoop.hive.metastore.handler.GetPartitionsHandler; import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; @@ -122,9 +123,6 @@ public class HMSHandler extends FacebookBase implements IHMSHandler { public static final String ADMIN = "admin"; public static final String PUBLIC = "public"; - static final String NO_FILTER_STRING = ""; - static final int UNLIMITED_MAX_PARTITIONS = -1; - static final int LOG_SAMPLE_PARTITIONS_MAX_SIZE = 4; static final int LOG_SAMPLE_PARTITIONS_HALF_SIZE = 2; @@ -286,6 +284,11 @@ public IMetaStoreMetadataTransformer getMetadataTransformer() { return transformer; } + @Override + public MetaStoreFilterHook getMetaFilterHook() { + return filterHook; + } + @Override public void init() throws MetaException { init(new Warehouse(conf)); @@ -411,29 +414,6 @@ private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException { } } - /** - * Check if user can access the table associated with the partition. If not, then throw exception - * so user cannot access partitions associated with this table - * We are not calling Pre event listener for authorization because it requires getting the - * table object from DB, more overhead. Instead ,we call filter hook to filter out table if user - * has no access. Filter hook only requires table name, not table object. That saves DB access for - * table object, and still achieve the same purpose: checking if user can access the specified - * table - * - * @param catName catalog name of the table - * @param dbName database name of the table - * @param tblName table name - * @throws NoSuchObjectException - * @throws MetaException - */ - private void authorizeTableForPartitionMetadata( - final String catName, final String dbName, final String tblName) - throws NoSuchObjectException, MetaException { - - FilterUtils.checkDbAndTableFilters( - isServerFilterEnabled, filterHook, catName, dbName, tblName); - } - @Override public void setConf(Configuration conf) { HMSHandlerContext.setConfiguration(conf); @@ -3313,37 +3293,19 @@ public Partition get_partition(final String db_name, final String tbl_name, return ret; } - private Partition get_partition_core(final String db_name, final String tbl_name, - final List part_vals) throws MetaException, NoSuchObjectException { - String[] parsedDbName = parseDbName(db_name, conf); - startPartitionFunction("get_partition_core", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, part_vals); - - Partition ret = null; - Exception ex = null; - try { - authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); - ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); - } catch (Exception e) { - ex = e; - throw handleException(e).throwIfInstance(MetaException.class, NoSuchObjectException.class).defaultMetaException(); - } finally { - endFunction("get_partition_core", ret != null, ex, tbl_name); - } - return ret; - } - @Override public GetPartitionResponse get_partition_req(GetPartitionRequest req) throws MetaException, NoSuchObjectException, TException { - // TODO Move the logic from get_partition to here, as that method is getting deprecated - String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), req.getDbName(), conf); - Partition p = get_partition_core(dbName, req.getTblName(), req.getPartVals()); - GetPartitionResponse res = new GetPartitionResponse(); - res.setPartition(p); - return res; + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); + List partitions = GetPartitionsHandler.getPartitions( + t -> startTableFunction("get_partition_req", catName, t.getDb(), t.getCat()), + ex -> endFunction("get_partition_req", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().part_vals(req.getPartVals()).build()); + if (partitions == null || partitions.isEmpty()) { + throw new NoSuchObjectException("partition values=" + req.getPartVals()); + } + return new GetPartitionResponse(partitions.getFirst()); } /** @@ -3429,26 +3391,18 @@ public Partition get_partition_with_auth(final String db_name, final String user_name, final List group_names) throws TException { String[] parsedDbName = parseDbName(db_name, conf); - startFunction("get_partition_with_auth", - " : tbl=" + TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name) - + samplePartitionValues(part_vals) + getGroupsCountAndUsername(user_name,group_names)); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - Partition ret = null; - Exception ex = null; - try { - authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - - ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, part_vals, user_name, group_names); - ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); - } catch (Exception e) { - ex = e; - handleException(e).convertIfInstance(InvalidObjectException.class, NoSuchObjectException.class) - .rethrowException(e); - } finally { - endFunction("get_partition_with_auth", ret != null, ex, tbl_name); + TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + List partitions = GetPartitionsHandler.getPartitionsResult( + t -> startFunction("get_partition_with_auth", + " : tbl=" + t + samplePartitionValues(part_vals) + getGroupsCountAndUsername(user_name,group_names)), + ex -> endFunction("get_partition_with_auth", ex == null, ex, tbl_name), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .part_vals(part_vals).userName(user_name).groupNames(group_names).build()) + .result(); + if (partitions == null || partitions.isEmpty()) { + throw new NoSuchObjectException("partition values=" + part_vals); } - return ret; + return partitions.get(0); } /** @@ -3459,70 +3413,29 @@ public Partition get_partition_with_auth(final String db_name, @Deprecated public List get_partitions(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { - return get_partitions(db_name, tbl_name, - new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build()); - } - - private List get_partitions(final String db_name, final String tbl_name, - GetPartitionsArgs args) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); - List ret = null; - Exception ex = null; - try { - PartitionsRequest req = new PartitionsRequest(parsedDbName[DB_NAME], tbl_name); - req.setCatName(parsedDbName[CAT_NAME]); - req.setMaxParts((short)args.getMax()); - req.setSkipColumnSchemaForPartition(false); - req.setIncludeParamKeyPattern(args.getIncludeParamKeyPattern()); - req.setExcludeParamKeyPattern(args.getExcludeParamKeyPattern()); - ret = get_partitions_req(req).getPartitions(); - } catch (Exception e) { - ex = e; - throwMetaException(e); - } - return ret; - - } - - private List get_partitions_core(final String db_name, final String tbl_name, - GetPartitionsArgs args) throws NoSuchObjectException, MetaException { - String[] parsedDbName = parseDbName(db_name, conf); - startTableFunction("get_partitions_core", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - List ret = null; - Exception ex = null; - try { - checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, NO_FILTER_STRING, args.getMax()); - - authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - - ret = getMS().getPartitions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, args); - ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); - } catch (Exception e) { - ex = e; - throwMetaException(e); - } finally { - endFunction("get_partitions_core", ret != null, ex, tbl_name); - } - return ret; - + TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + return GetPartitionsHandler.getPartitions( + t -> startTableFunction("get_partitions", tableName.getCat(), tableName.getDb(), tableName.getTable()), + ex -> endFunction("get_partitions", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build()); } @Override public PartitionsResponse get_partitions_req(PartitionsRequest req) throws NoSuchObjectException, MetaException, TException { - String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), req.getDbName(), conf); - List partitions = get_partitions_core(dbName, req.getTblName(), - new GetPartitionsArgs.GetPartitionsArgsBuilder() - .max(req.getMaxParts()) + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); + List partitions = GetPartitionsHandler.getPartitions( + t -> startTableFunction("get_partitions_req", catName, req.getDbName(), req.getTblName()), + ex -> endFunction("get_partitions_req", ex == null, ex, + TableName.getQualified(catName, req.getDbName(), req.getTblName())), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .includeParamKeyPattern(req.getIncludeParamKeyPattern()) .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) - .build()); - PartitionsResponse res = new PartitionsResponse(); - res.setPartitions(partitions); - return res; + .max(req.getMaxParts()).build()); + return new PartitionsResponse(partitions); } @Override @@ -3530,45 +3443,14 @@ public PartitionsResponse get_partitions_req(PartitionsRequest req) public List get_partitions_with_auth(final String dbName, final String tblName, final short maxParts, final String userName, final List groupNames) throws TException { - return get_partitions_ps_with_auth(dbName, tblName, - new GetPartitionsArgs.GetPartitionsArgsBuilder() - .max(maxParts).userName(userName).groupNames(groupNames) - .build()); - } - - private void checkLimitNumberOfPartitionsByFilter(String catName, String dbName, - String tblName, String filterString, - int requestMax) throws TException { - if (exceedsPartitionFetchLimit(requestMax)) { - checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(prependCatalogToDbName( - catName, dbName, conf), tblName, filterString)); - } - } - - private void checkLimitNumberOfPartitionsByPs(String catName, String dbName, String tblName, - List partVals, int requestMax) - throws TException { - if (exceedsPartitionFetchLimit(requestMax)) { - checkLimitNumberOfPartitions(tblName, getNumPartitionsByPs(catName, dbName, tblName, - partVals)); - } - } - - // Check input count exceeding partition limit iff: - // 1. partition limit is enabled. - // 2. input count is greater than the limit. - private boolean exceedsPartitionFetchLimit(int count) { - int partitionLimit = MetastoreConf.getIntVar(conf, ConfVars.LIMIT_PARTITION_REQUEST); - return partitionLimit > -1 && (count < 0 || count > partitionLimit); - } - - private void checkLimitNumberOfPartitions(String tblName, int numPartitions) throws MetaException { - if (exceedsPartitionFetchLimit(numPartitions)) { - int partitionLimit = MetastoreConf.getIntVar(conf, ConfVars.LIMIT_PARTITION_REQUEST); - String configName = ConfVars.LIMIT_PARTITION_REQUEST.toString(); - throw new MetaException(String.format(PARTITION_NUMBER_EXCEED_LIMIT_MSG, numPartitions, - tblName, partitionLimit, configName)); - } + String[] parsedDbName = parseDbName(dbName, conf); + TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + return GetPartitionsHandler.getPartitionsResult( + t -> startTableFunction("get_partitions_with_auth", t.getCat(), t.getDb(), t.getTable()), + ex -> endFunction("get_partitions_with_auth", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .max(maxParts).userName(userName).groupNames(groupNames).build()) + .result(); } @Override @@ -3589,9 +3471,10 @@ public List get_partitions_pspec(final String db_name, final Stri getTableRequest.setCatName(catName); Table table = get_table_core(getTableRequest); // get_partitions will parse out the catalog and db names itself - List partitions = get_partitions(db_name, tableName, - new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build()); - + GetPartitionsHandler getPartitionsHandler = AbstractRequestHandler.offer(this, + new GetPartitionsHandler.GetPartitionsRequest(new TableName(catName, dbName, tableName), + new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build())); + List partitions = getPartitionsHandler.getResult().result(); if (is_partition_spec_grouping_enabled(table)) { partitionSpecs = MetaStoreServerUtils .getPartitionspecsGroupedByStorageDescriptor(table, partitions); @@ -3607,6 +3490,8 @@ public List get_partitions_pspec(final String db_name, final Stri } return partitionSpecs; + } catch (Exception e) { + throw handleException(e).throwIfInstance(NoSuchObjectException.class, MetaException.class).defaultMetaException(); } finally { endFunction("get_partitions_pspec", partitionSpecs != null && !partitionSpecs.isEmpty(), null, tbl_name); @@ -3691,51 +3576,54 @@ public List get_partition_names(final String db_name, final String tbl_n } @Override - public List fetch_partition_names_req(final PartitionsRequest partitionReq) + public List fetch_partition_names_req(final PartitionsRequest req) throws NoSuchObjectException, MetaException { - String catName = partitionReq.getCatName(); - String dbName = partitionReq.getDbName(); - String tbl_name = partitionReq.getTblName(); - startTableFunction("fetch_partition_names_req", catName, dbName, tbl_name); - fireReadTablePreEvent(catName, dbName, tbl_name); - List ret = null; - Exception ex = null; try { - authorizeTableForPartitionMetadata(catName, dbName, tbl_name); - ret = getMS().listPartitionNames(catName, dbName, tbl_name, partitionReq.getMaxParts()); - ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, - filterHook, catName, dbName, tbl_name, ret); - } catch (Exception e) { - ex = e; - throw newMetaException(e); - } finally { - endFunction("fetch_partition_names_req", ret != null, ex, tbl_name); + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + String dbName = req.getDbName(), tblName = req.getTblName(); + TableName tableName = new TableName(catName, dbName, tblName); + return GetPartitionsHandler.getPartitionNames( + t -> startTableFunction("fetch_partition_names_req", catName, dbName, tblName), + ex -> endFunction("fetch_partition_names_req", ex == null, ex, tableName.toString()), this, tableName, + new GetPartitionsArgs.GetPartitionsArgsBuilder().max(req.getMaxParts()).build()).result(); + } catch (TException ex) { + throw handleException(ex).throwIfInstance(NoSuchObjectException.class, MetaException.class).defaultMetaException(); } - return ret; } @Override public PartitionValuesResponse get_partition_values(PartitionValuesRequest request) throws MetaException { String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); - String dbName = request.getDbName(); - String tblName = request.getTblName(); + TableName tableName = new TableName(catName, request.getDbName(), request.getTblName()); long maxParts = request.getMaxParts(); String filter = request.isSetFilter() ? request.getFilter() : ""; - startPartitionFunction("get_partition_values", catName, dbName, tblName, (int) maxParts, filter); + GetPartitionsHandler.GetPartitionsRequest getPartitionsRequest = + new GetPartitionsHandler.GetPartitionsRequest(tableName, + new GetPartitionsArgs.GetPartitionsArgsBuilder().max((int) maxParts).filter(filter).build()); + startPartitionFunction("get_partition_values", catName, tableName.getDb(), tableName.getTable(), + (int) maxParts, filter); + Exception ex = null; try { - authorizeTableForPartitionMetadata(catName, dbName, tblName); - // This is serious black magic, as the following 2 lines do nothing AFAICT but without them // the subsequent call to listPartitionValues fails. - List partCols = new ArrayList(); - partCols.add(request.getPartitionKeys().get(0)); - return getMS().listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(), - request.isApplyDistinct(), request.getFilter(), request.isAscending(), - request.getPartitionOrder(), request.getMaxParts()); - } catch (NoSuchObjectException e) { - LOG.error(String.format("Unable to get partition for %s.%s.%s", catName, dbName, tblName), e); - throw new MetaException(e.getMessage()); + getPartitionsRequest.setApplyDistinct(request.isApplyDistinct()); + getPartitionsRequest.setAscending(request.isAscending()); + getPartitionsRequest.setPartitionKeys(request.getPartitionKeys()); + getPartitionsRequest.setPartitionOrders(request.getPartitionOrder()); + getPartitionsRequest.setGetPartitionValues(true); + GetPartitionsHandler getPartitionsHandler = + AbstractRequestHandler.offer(this, getPartitionsRequest); + List resps = getPartitionsHandler.getResult().result(); + if (resps == null || resps.isEmpty()) { + throw new MetaException(String.format("Unable to get partition for %s", tableName.toString())); + } + return resps.getFirst(); + } catch (Exception e) { + ex = e; + throw handleException(e).throwIfInstance(MetaException.class).defaultMetaException(); + } finally { + endFunction("get_partition_values", ex == null, ex, tableName.toString()); } } @@ -4487,48 +4375,22 @@ private List getPartValsFromName(RawStore ms, String catName, String dbN return getPartValsFromName(t, partName); } - private Partition get_partition_by_name_core(final RawStore ms, final String catName, - final String db_name, final String tbl_name, - final String part_name) throws TException { - fireReadTablePreEvent(catName, db_name, tbl_name); - List partVals; - try { - partVals = getPartValsFromName(ms, catName, db_name, tbl_name, part_name); - } catch (InvalidObjectException e) { - throw new NoSuchObjectException(e.getMessage()); - } - Partition p = ms.getPartition(catName, db_name, tbl_name, partVals); - p = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, p); - - if (p == null) { - throw new NoSuchObjectException(TableName.getQualified(catName, db_name, tbl_name) - + " partition (" + part_name + ") not found"); - } - return p; - } - @Override @Deprecated public Partition get_partition_by_name(final String db_name, final String tbl_name, final String part_name) throws TException { String[] parsedDbName = parseDbName(db_name, conf); - startFunction("get_partition_by_name", ": tbl=" + - TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name) - + " part=" + part_name); - Partition ret = null; - Exception ex = null; - try { - ret = get_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, part_name); - ret = FilterUtils.filterPartitionIfEnabled(isServerFilterEnabled, filterHook, ret); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("get_partition_by_name", ret != null, ex, tbl_name); + TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + List partitions = GetPartitionsHandler.getPartitions( + t -> startFunction("get_partition_by_name", ": tbl=" + t + " part=" + part_name), + ex -> endFunction("get_partition_by_name", ex == null, ex, tableName.toString()), + this, tableName, + new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(Arrays.asList(part_name)).build()); + if (partitions == null || partitions.isEmpty()) { + throw new NoSuchObjectException(tableName + " partition (" + part_name + ") not found"); } - return ret; + return partitions.getFirst(); } @Deprecated @@ -4592,27 +4454,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partitions_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) throws TException { - String[] parsedDbName = parseDbName(db_name, conf); - startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, max_parts, - part_vals); - - List ret = null; - Exception ex = null; - try { - authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - // Don't send the parsedDbName, as this method will parse itself. - ret = get_partitions_ps_with_auth(db_name, tbl_name, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .part_vals(part_vals).max(max_parts) - .build()); - ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("get_partitions_ps", ret != null, ex, tbl_name); - } - - return ret; + return get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, null, null); } /** @@ -4625,56 +4467,31 @@ public List get_partitions_ps_with_auth(final String db_name, final String tbl_name, final List part_vals, final short max_parts, final String userName, final List groupNames) throws TException { - return get_partitions_ps_with_auth(db_name, tbl_name, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .part_vals(part_vals).max(max_parts).userName(userName).groupNames(groupNames) - .build()); - } - - private List get_partitions_ps_with_auth(final String db_name, - final String tbl_name, GetPartitionsArgs args) throws TException { String[] parsedDbName = parseDbName(db_name, conf); - startPartitionFunction("get_partitions_ps_with_auth", parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, args.getPart_vals()); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - List ret = null; - Exception ex = null; - try { - if (args.getPart_vals() != null) { - checkLimitNumberOfPartitionsByPs(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, args.getPart_vals(), args.getMax()); - } else { - checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, NO_FILTER_STRING, args.getMax()); - } - authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - ret = getMS().listPartitionsPsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, args); - ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); - } catch (Exception e) { - ex = e; - handleException(e).convertIfInstance(InvalidObjectException.class, MetaException.class).rethrowException(e); - } finally { - endFunction("get_partitions_ps_with_auth", ret != null, ex, tbl_name); - } - return ret; + TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + return GetPartitionsHandler.getPartitions( + t -> startTableFunction("get_partitions_ps_with_auth", t.getCat(), t.getDb(), t.getTable()), + ex -> endFunction("get_partitions_ps_with_auth", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .part_vals(part_vals).max(max_parts).userName(userName).groupNames(groupNames).build()); } @Override public GetPartitionsPsWithAuthResponse get_partitions_ps_with_auth_req(GetPartitionsPsWithAuthRequest req) throws MetaException, NoSuchObjectException, TException { - String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), req.getDbName(), conf); - List partitions = - get_partitions_ps_with_auth(dbName, req.getTblName(), new GetPartitionsArgs.GetPartitionsArgsBuilder() + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); + List partitions = GetPartitionsHandler.getPartitionsResult( + t -> startTableFunction("get_partition_names_req", catName, t.getDb(), t.getTable()), + ex -> endFunction("get_partitions_ps_with_auth_req", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .part_vals(req.getPartVals()).max(req.getMaxParts()) .userName(req.getUserName()).groupNames(req.getGroupNames()) .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) .includeParamKeyPattern(req.getIncludeParamKeyPattern()) - .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) - .partNames(req.getPartNames()) - .build()); - GetPartitionsPsWithAuthResponse res = new GetPartitionsPsWithAuthResponse(); - res.setPartitions(partitions); - return res; + .excludeParamKeyPattern(req.getExcludeParamKeyPattern()).partNames(req.getPartNames()).build()) + .result(); + return new GetPartitionsPsWithAuthResponse(partitions); } /** @@ -4707,36 +4524,18 @@ public List get_partition_names_ps(final String db_name, return ret; } - private List get_partition_names_ps_core(final String db_name, - final String tbl_name, final List part_vals, final short max_parts) - throws TException { - String[] parsedDbName = parseDbName(db_name, conf); - startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, part_vals); - fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - List ret = null; - Exception ex = null; - try { - authorizeTableForPartitionMetadata(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - ret = getMS().listPartitionNamesPs(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, - part_vals, max_parts); - ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, - filterHook, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, ret); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("get_partitions_names_ps", ret != null, ex, tbl_name); - } - return ret; - } - @Override public GetPartitionNamesPsResponse get_partition_names_ps_req(GetPartitionNamesPsRequest req) throws MetaException, NoSuchObjectException, TException { - String dbName = MetaStoreUtils.prependCatalogToDbName(req.getCatName(), req.getDbName(), conf); - List names = get_partition_names_ps_core(dbName, req.getTblName(), req.getPartValues(), - req.getMaxParts()); + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + String dbName = req.getDbName(), tblName = req.getTblName(); + TableName tableName = new TableName(catName, dbName, tblName); + List names = GetPartitionsHandler.getPartitionNames( + t -> startTableFunction("get_partition_names_ps_req", catName, dbName, tblName), + ex -> endFunction("get_partition_names_ps_req", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .max(req.getMaxParts()).part_vals(req.getPartValues()).build()) + .result(); GetPartitionNamesPsResponse res = new GetPartitionNamesPsResponse(); res.setNames(names); return res; @@ -4747,24 +4546,14 @@ public List get_partition_names_req(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, TException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); String dbName = req.getDbName(), tblName = req.getTblName(); - startTableFunction("get_partition_names_req", catName, - dbName, tblName); - fireReadTablePreEvent(catName, dbName, tblName); - List ret = null; - Exception ex = null; - try { - authorizeTableForPartitionMetadata(catName, dbName, tblName); - ret = getMS().listPartitionNames(catName, dbName, tblName, - req.getDefaultPartitionName(), req.getExpr(), req.getOrder(), req.getMaxParts()); - ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, - filterHook, catName, dbName, tblName, ret); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("get_partition_names_req", ret != null, ex, tblName); - } - return ret; + TableName tableName = new TableName(catName, dbName, tblName); + return GetPartitionsHandler.getPartitionNames( + t -> startTableFunction("get_partition_names_req", catName, dbName, tblName), + ex -> endFunction("get_partition_names_req", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .defaultPartName(req.getDefaultPartitionName()).max(req.getMaxParts()) + .expr(req.getExpr()).order(req.getOrder()).build()) + .result(); } @Override @@ -5108,50 +4897,26 @@ public List get_partitions_by_filter(final String dbName, final Strin final String filter, final short maxParts) throws TException { String[] parsedDbName = parseDbName(dbName, conf); - return get_partitions_by_filter_internal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, - new GetPartitionsArgs.GetPartitionsArgsBuilder().filter(filter).max(maxParts).build()); - } - - private List get_partitions_by_filter_internal(final String catName, - final String dbName, final String tblName, GetPartitionsArgs args) throws TException { - startTableFunction("get_partitions_by_filter", catName, dbName, - tblName); - fireReadTablePreEvent(catName, dbName, tblName); - List ret = null; - Exception ex = null; - RawStore rs = getMS(); - try { - authorizeTableForPartitionMetadata(catName, dbName, tblName); - if (exceedsPartitionFetchLimit(args.getMax())) { - // Since partition limit is configured, we need fetch at most (limit + 1) partition names - int max = MetastoreConf.getIntVar(conf, ConfVars.LIMIT_PARTITION_REQUEST) + 1; - args = new GetPartitionsArgs.GetPartitionsArgsBuilder(args).max(max).build(); - List partNames = rs.listPartitionNamesByFilter(catName, dbName, tblName, args); - checkLimitNumberOfPartitions(tblName, partNames.size()); - ret = rs.getPartitionsByNames(catName, dbName, tblName, - new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build()); - } else { - ret = rs.getPartitionsByFilter(catName, dbName, tblName, args); - } - - ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("get_partitions_by_filter", ret != null, ex, tblName); - } - return ret; + TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + return GetPartitionsHandler.getPartitionsResult( + t -> startTableFunction("get_partitions_by_filter", t.getCat(), t.getDb(), t.getTable()), + ex -> endFunction("get_partitions_by_filter", ex == null, ex, tableName.toString()), + this, tableName, + new GetPartitionsArgs.GetPartitionsArgsBuilder().filter(filter).max(maxParts).build()).result(); } + @Override public List get_partitions_by_filter_req(GetPartitionsByFilterRequest req) throws TException { - return get_partitions_by_filter_internal(req.getCatName(), req.getDbName(), req.getTblName(), - new GetPartitionsArgs.GetPartitionsArgsBuilder() + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); + return GetPartitionsHandler.getPartitionsResult( + t -> startTableFunction("get_partitions_by_filter", catName, t.getDb(), t.getTable()), + ex -> endFunction("get_partitions_by_filter", ex == null, ex, tableName.toString()), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .filter(req.getFilter()).max(req.getMaxParts()) .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) - .includeParamKeyPattern(req.getIncludeParamKeyPattern()) - .build()); + .includeParamKeyPattern(req.getIncludeParamKeyPattern()).build()).result(); } @Override @@ -5197,32 +4962,23 @@ public PartitionsSpecByExprResult get_partitions_spec_by_expr( PartitionsByExprRequest req) throws TException { String dbName = req.getDbName(), tblName = req.getTblName(); String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); - startTableFunction("get_partitions_spec_by_expr", catName, dbName, tblName); - fireReadTablePreEvent(catName, dbName, tblName); - PartitionsSpecByExprResult ret = null; - Exception ex = null; - try { - PartitionsByExprResult result = get_partitions_by_expr_internal(catName, dbName, tblName, - new GetPartitionsArgs.GetPartitionsArgsBuilder() - .expr(req.getExpr()).max(req.getMaxParts()).defaultPartName(req.getDefaultPartitionName()) - .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) - .includeParamKeyPattern(req.getIncludeParamKeyPattern()) - .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) - .build()); - - GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName); - getTableRequest.setCatName(catName); - Table table = get_table_core(getTableRequest); - List partitionSpecs = - MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(table, result.getPartitions()); - ret = new PartitionsSpecByExprResult(partitionSpecs, result.isHasUnknownPartitions()); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("get_partitions_spec_by_expr", ret != null, ex, tblName); - } - return ret; + TableName tableName = new TableName(catName, dbName, tblName); + GetPartitionsHandler.GetPartitionsResult result = + GetPartitionsHandler.getPartitionsResult( + t -> startTableFunction("get_partitions_spec_by_expr", catName, dbName, req.getTblName()), + e -> endFunction("get_partitions_spec_by_expr", e == null, e, + TableName.getQualified(catName, req.getDbName(), req.getTblName())), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .expr(req.getExpr()).max(req.getMaxParts()).defaultPartName(req.getDefaultPartitionName()) + .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) + .includeParamKeyPattern(req.getIncludeParamKeyPattern()) + .excludeParamKeyPattern(req.getExcludeParamKeyPattern()).build()); + GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName); + getTableRequest.setCatName(catName); + Table table = get_table_core(getTableRequest); + List partitionSpecs = + MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(table, result.result()); + return new PartitionsSpecByExprResult(partitionSpecs, result.success()); } @Override @@ -5230,46 +4986,20 @@ public PartitionsByExprResult get_partitions_by_expr( PartitionsByExprRequest req) throws TException { String dbName = req.getDbName(), tblName = req.getTblName(); String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); - String expr = req.isSetExpr() ? Arrays.toString((req.getExpr())) : ""; + TableName tableName = new TableName(catName, dbName, tblName); String defaultPartitionName = req.isSetDefaultPartitionName() ? req.getDefaultPartitionName() : ""; int maxParts = req.getMaxParts(); - startPartitionFunction("get_partitions_by_expr", catName, dbName, tblName, maxParts, expr, defaultPartitionName); - fireReadTablePreEvent(catName, dbName, tblName); - PartitionsByExprResult ret = null; - Exception ex = null; - try { - ret = get_partitions_by_expr_internal(catName, dbName, tblName, - new GetPartitionsArgs.GetPartitionsArgsBuilder() - .expr(req.getExpr()).defaultPartName(req.getDefaultPartitionName()).max(req.getMaxParts()) - .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) - .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) - .includeParamKeyPattern(req.getIncludeParamKeyPattern()) - .build()); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("get_partitions_by_expr", ret != null, ex, tblName); - } - return ret; - } - - private PartitionsByExprResult get_partitions_by_expr_internal( - String catName, String dbName, String tblName, GetPartitionsArgs args) throws TException { - List partitions = new LinkedList<>(); - boolean hasUnknownPartitions = false; - RawStore rs = getMS(); - if (exceedsPartitionFetchLimit(args.getMax())) { - // Since partition limit is configured, we need fetch at most (limit + 1) partition names - int max = MetastoreConf.getIntVar(conf, ConfVars.LIMIT_PARTITION_REQUEST) + 1; - List partNames = rs.listPartitionNames(catName, dbName, tblName, args.getDefaultPartName(), args.getExpr(), null, max); - checkLimitNumberOfPartitions(tblName, partNames.size()); - partitions = rs.getPartitionsByNames(catName, dbName, tblName, - new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build()); - } else { - hasUnknownPartitions = rs.getPartitionsByExpr(catName, dbName, tblName, partitions, args); - } - return new PartitionsByExprResult(partitions, hasUnknownPartitions); + GetPartitionsHandler.GetPartitionsResult result = + GetPartitionsHandler.getPartitionsResult( + t -> startTableFunction("get_partitions_by_expr", catName, dbName, req.getTblName()), + ex -> endFunction("get_partitions_by_expr", ex == null, ex, + TableName.getQualified(catName, req.getDbName(), req.getTblName())), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .expr(req.getExpr()).defaultPartName(defaultPartitionName).max(maxParts) + .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) + .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) + .includeParamKeyPattern(req.getIncludeParamKeyPattern()).build()); + return new PartitionsByExprResult(result.result(), result.success()); } @Override @@ -5299,116 +5029,46 @@ public int get_num_partitions_by_filter(final String dbName, return ret; } - private int getNumPartitionsByPs(final String catName, final String dbName, - final String tblName, List partVals) - throws TException { - String[] parsedDbName = parseDbName(dbName, conf); - startTableFunction("getNumPartitionsByPs", parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tblName); - - int ret = -1; - Exception ex = null; - try { - ret = getMS().getNumPartitionsByPs(catName, dbName, tblName, partVals); - } catch (Exception e) { - ex = e; - rethrowException(e); - } finally { - endFunction("getNumPartitionsByPs", ret != -1, ex, tblName); - } - return ret; - } - @Override @Deprecated public List get_partitions_by_names(final String dbName, final String tblName, final List partNames) throws TException { - return get_partitions_by_names(dbName, tblName, false, null, null, null, - new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(partNames).build()); + String[] dbNameParts = parseDbName(dbName, conf); + TableName tableName = new TableName(dbNameParts[CAT_NAME], dbNameParts[DB_NAME], tblName); + return GetPartitionsHandler.getPartitionsResult( + t -> startTableFunction("get_partitions_by_names", t.getCat(), t.getDb(), t.getTable()), + ex -> endFunction("get_partitions_by_names", ex == null, ex, tableName.toString()), + this, tableName, + new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(partNames).build()).result(); } @Override public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest gpbnr) throws TException { - List partitions = get_partitions_by_names(gpbnr.getDb_name(), - gpbnr.getTbl_name(), - gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats(), gpbnr.getEngine(), - gpbnr.getProcessorCapabilities(), gpbnr.getProcessorIdentifier(), - new GetPartitionsArgs.GetPartitionsArgsBuilder() + String[] dbNameParts = parseDbName(gpbnr.getDb_name(), conf); + TableName tableName = new TableName(dbNameParts[CAT_NAME], dbNameParts[DB_NAME], gpbnr.getTbl_name()); + GetPartitionsHandler.GetPartitionsRequest request = + new GetPartitionsHandler.GetPartitionsRequest(tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .partNames(gpbnr.getNames()).skipColumnSchemaForPartition(gpbnr.isSkipColumnSchemaForPartition()) .excludeParamKeyPattern(gpbnr.getExcludeParamKeyPattern()) - .includeParamKeyPattern(gpbnr.getIncludeParamKeyPattern()) - .build()); - GetPartitionsByNamesResult result = new GetPartitionsByNamesResult(partitions); - return result; - } - - private List get_partitions_by_names(final String dbName, final String tblName, - boolean getColStats, String engine, - List processorCapabilities, String processorId, - GetPartitionsArgs args) throws TException { - - String[] dbNameParts = parseDbName(dbName, conf); - String parsedCatName = dbNameParts[CAT_NAME]; - String parsedDbName = dbNameParts[DB_NAME]; - List ret = null; - Table table = null; + .includeParamKeyPattern(gpbnr.getIncludeParamKeyPattern()).build()); + request.setEngine(gpbnr.getEngine()); + request.setGetColStats(gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats()); + request.setProcessorId(gpbnr.getProcessorIdentifier()); + request.setProcessorCapabilities(request.getProcessorCapabilities()); + startTableFunction("get_partitions_by_names", tableName.getCat(), tableName.getDb(), tableName.getTable()); Exception ex = null; - boolean success = false; - startTableFunction("get_partitions_by_names", parsedCatName, parsedDbName, tblName); - try { - getMS().openTransaction(); - authorizeTableForPartitionMetadata(parsedCatName, parsedDbName, tblName); - - fireReadTablePreEvent(parsedCatName, parsedDbName, tblName); - - checkLimitNumberOfPartitions(tblName, args.getPartNames().size()); - ret = getMS().getPartitionsByNames(parsedCatName, parsedDbName, tblName, args); - ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); - table = getTable(parsedCatName, parsedDbName, tblName); - - // If requested add column statistics in each of the partition objects - if (getColStats) { - // Since each partition may have stats collected for different set of columns, we - // request them separately. - for (Partition part: ret) { - String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); - List partColStatsList = - getMS().getPartitionColumnStatistics(parsedCatName, parsedDbName, tblName, - Collections.singletonList(partName), - StatsSetupConst.getColumnsHavingStats(part.getParameters()), - engine); - if (partColStatsList != null && !partColStatsList.isEmpty()) { - ColumnStatistics partColStats = partColStatsList.get(0); - if (partColStats != null) { - part.setColStats(partColStats); - } - } - } - } - - if (processorCapabilities == null || processorCapabilities.size() == 0 || - processorCapabilities.contains("MANAGERAWMETADATA")) { - LOG.info("Skipping translation for processor with " + processorId); - } else { - if (transformer != null) { - ret = transformer.transformPartitions(ret, table, processorCapabilities, processorId); - } - } - success = getMS().commitTransaction(); + try { + GetPartitionsHandler getPartitionsHandler = AbstractRequestHandler.offer(this, request); + List partitions = getPartitionsHandler.getResult().result(); + return new GetPartitionsByNamesResult(partitions); } catch (Exception e) { ex = e; - throw handleException(e) - .throwIfInstance(MetaException.class, NoSuchObjectException.class, InvalidObjectException.class) - .defaultMetaException(); + throw handleException(e).throwIfInstance(TException.class).defaultTException(); } finally { - if (!success) { - getMS().rollbackTransaction(); - } - endFunction("get_partitions_by_names", ret != null, ex, tblName); + endFunction("get_partitions_by_names", ex == null, ex, tableName.toString()); } - return ret; } /** @@ -7081,11 +6741,6 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest req) throws TExcepti } } - private Table getTable(String catName, String dbName, String tableName) - throws MetaException, InvalidObjectException { - return getTable(catName, dbName, tableName, null); - } - private Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException, InvalidObjectException { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index 3d8c21f07557..7538ce896bfa 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -110,4 +110,6 @@ DataConnector get_dataconnector_core(final String name) AbortCompactResponse abort_Compactions(AbortCompactionRequest rqst) throws TException; IMetaStoreMetadataTransformer getMetadataTransformer(); + + MetaStoreFilterHook getMetaFilterHook(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java index 627e10ade3fc..b1e1d498cefd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java @@ -31,6 +31,7 @@ public class GetPartitionsArgs { private String includeParamKeyPattern; private String excludeParamKeyPattern; private boolean skipColumnSchemaForPartition; + private String order; private GetPartitionsArgs() { @@ -80,6 +81,10 @@ public boolean isSkipColumnSchemaForPartition() { return skipColumnSchemaForPartition; } + public String getOrder() { + return order; + } + public static class GetPartitionsArgsBuilder { private String filter; private byte[] expr; @@ -91,6 +96,7 @@ public static class GetPartitionsArgsBuilder { private List groupNames; private String includeParamKeyPattern; private String excludeParamKeyPattern; + private String order; private boolean skipColumnSchemaForPartition = false; public GetPartitionsArgsBuilder() { @@ -109,6 +115,7 @@ public GetPartitionsArgsBuilder(GetPartitionsArgs args) { this.includeParamKeyPattern = args.includeParamKeyPattern; this.excludeParamKeyPattern = args.excludeParamKeyPattern; this.skipColumnSchemaForPartition = args.skipColumnSchemaForPartition; + this.order = args.order; } public GetPartitionsArgsBuilder filter(String filter) { @@ -146,6 +153,11 @@ public GetPartitionsArgsBuilder userName(String userName) { return this; } + public GetPartitionsArgsBuilder order(String order) { + this.order = order; + return this; + } + public GetPartitionsArgsBuilder groupNames(List groupNames) { this.groupNames = groupNames; return this; @@ -179,6 +191,7 @@ public GetPartitionsArgs build() { additionalArgs.includeParamKeyPattern = includeParamKeyPattern; additionalArgs.excludeParamKeyPattern = excludeParamKeyPattern; additionalArgs.skipColumnSchemaForPartition = skipColumnSchemaForPartition; + additionalArgs.order = order; return additionalArgs; } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java index d8d7316e876a..14528799b7ca 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AbstractRequestHandler.java @@ -58,6 +58,7 @@ import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_IN_TEST; import static org.apache.hadoop.hive.metastore.utils.JavaUtils.newInstance; +@SuppressWarnings("rawtypes") public abstract class AbstractRequestHandler { private static final Logger LOG = LoggerFactory.getLogger(AbstractRequestHandler.class); private static final Map ID_TO_HANDLER = new ConcurrentHashMap<>(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java new file mode 100644 index 000000000000..86bef3ceb563 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java @@ -0,0 +1,477 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.handler; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.function.Consumer; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.HMSHandler; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.MetaStoreFilterHook; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.GetTableRequest; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; +import org.apache.hadoop.hive.metastore.utils.FilterUtils; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.hive.metastore.ExceptionHandler.handleException; +import static org.apache.hadoop.hive.metastore.HMSHandler.PARTITION_NUMBER_EXCEED_LIMIT_MSG; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + +// Collect get partitions APIs together +@SuppressWarnings({"unchecked", "rawtypes"}) +@RequestHandler(requestBody = GetPartitionsHandler.GetPartitionsRequest.class) +public class GetPartitionsHandler extends AbstractRequestHandler> { + private static final Logger LOG = LoggerFactory.getLogger(GetPartitionsHandler.class); + private static final String NO_FILTER_STRING = ""; + private RawStore rs; + private String catName; + private String dbName; + private String tblName; + private GetPartitionsArgs args; + private Table table; + private Configuration conf; + private GetMethod getMethod = GetMethod.ALL; + private MetaStoreFilterHook filterHook; + private boolean isServerFilterEnabled; + + enum GetMethod { + EXPR, NAMES, FILTER, PART_VALS, ALL, VALUES + } + + GetPartitionsHandler(IHMSHandler handler, GetPartitionsRequest request) { + super(handler, false, request); + } + + @Override + protected void beforeExecute() throws TException, IOException { + this.args = request.getGetPartitionsArgs(); + if (request.isGetPartitionValues()) { + getMethod = GetMethod.VALUES; + } else if (args.getExpr() != null) { + getMethod = GetMethod.EXPR; + } else if (args.getFilter() != null) { + getMethod = GetMethod.FILTER; + } else if (args.getPartNames() != null) { + getMethod = GetMethod.NAMES; + } else if (args.getPart_vals() != null) { + getMethod = GetMethod.PART_VALS; + } + + this.catName = normalizeIdentifier(request.getTableName().getCat()); + this.dbName = normalizeIdentifier(request.getTableName().getDb()); + this.tblName = normalizeIdentifier(request.getTableName().getTable()); + this.conf = handler.getConf(); + this.rs = handler.getMS(); + this.filterHook = handler.getMetaFilterHook(); + this.isServerFilterEnabled = filterHook != null; + GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName); + getTableRequest.setCatName(catName); + this.table = handler.get_table_core(getTableRequest); + ((HMSHandler) handler).firePreEvent(new PreReadTableEvent(table, handler)); + authorizeTableForPartitionMetadata(); + } + + @Override + protected GetPartitionsResult execute() throws TException, IOException { + return (GetPartitionsResult) switch (getMethod) { + case EXPR -> getPartitionsByExpr(); + case FILTER -> getPartitionsByFilter(); + case NAMES -> getPartitionsByNames(); + case PART_VALS -> getPartitionsByVals(); + case ALL -> getPartitions(); + case VALUES -> getPartitionValues(); + }; + } + + private GetPartitionsResult getPartitionsByVals() throws TException { + if (request.isFetchPartNames()) { + List ret = rs.listPartitionNamesPs(catName, dbName, tblName, + args.getPart_vals(), (short) args.getMax()); + ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, + filterHook, catName, dbName, tblName, ret); + return new GetPartitionsResult<>(ret, true); + } else { + List ret; + if (args.getPart_vals() != null) { + checkLimitNumberOfPartitionsByPs(args.getPart_vals(), args.getMax()); + } else { + checkLimitNumberOfPartitionsByFilter(NO_FILTER_STRING, args.getMax()); + } + ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args); + return new GetPartitionsResult(ret, true); + } + } + + private GetPartitionsResult getPartitionValues() throws MetaException { + PartitionValuesResponse resp = rs.listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(), + request.isApplyDistinct(), args.getFilter(), request.isAscending(), + request.getPartitionOrders(), args.getMax()); + return new GetPartitionsResult<>(Arrays.asList(resp), true); + } + + private void checkLimitNumberOfPartitionsByPs(List partVals, int requestMax) + throws TException { + if (exceedsPartitionFetchLimit(requestMax)) { + checkLimitNumberOfPartitions(tblName, rs.getNumPartitionsByPs(catName, dbName, tblName, + partVals)); + } + } + + private GetPartitionsResult getPartitionsByFilter() throws TException { + List ret = null; + if (exceedsPartitionFetchLimit(args.getMax())) { + // Since partition limit is configured, we need fetch at most (limit + 1) partition names + int max = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST) + 1; + args = new GetPartitionsArgs.GetPartitionsArgsBuilder(args).max(max).build(); + List partNames = rs.listPartitionNamesByFilter(catName, dbName, tblName, args); + checkLimitNumberOfPartitions(tblName, partNames.size()); + ret = rs.getPartitionsByNames(catName, dbName, tblName, + new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build()); + } else { + ret = rs.getPartitionsByFilter(catName, dbName, tblName, args); + } + + ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); + return new GetPartitionsResult<>(ret, true); + } + + /** + * Check if user can access the table associated with the partition. If not, then throw exception + * so user cannot access partitions associated with this table + * We are not calling Pre event listener for authorization because it requires getting the + * table object from DB, more overhead. Instead ,we call filter hook to filter out table if user + * has no access. Filter hook only requires table name, not table object. That saves DB access for + * table object, and still achieve the same purpose: checking if user can access the specified + * table + * + * @throws NoSuchObjectException + * @throws MetaException + */ + private void authorizeTableForPartitionMetadata() + throws NoSuchObjectException, MetaException { + FilterUtils.checkDbAndTableFilters( + isServerFilterEnabled, filterHook, catName, dbName, tblName); + } + + private GetPartitionsResult getPartitionsByNames() throws TException { + List ret = null; + boolean success = false; + rs.openTransaction(); + try { + rs.openTransaction(); + checkLimitNumberOfPartitions(tblName, args.getPartNames().size()); + ret = rs.getPartitionsByNames(catName, dbName, tblName, args); + ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); + + // If requested add column statistics in each of the partition objects + if (request.isGetColStats()) { + // Since each partition may have stats collected for different set of columns, we + // request them separately. + for (Partition part: ret) { + String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); + List partColStatsList = + rs.getPartitionColumnStatistics(catName, dbName, tblName, + Collections.singletonList(partName), + StatsSetupConst.getColumnsHavingStats(part.getParameters()), + request.getEngine()); + if (partColStatsList != null && !partColStatsList.isEmpty()) { + ColumnStatistics partColStats = partColStatsList.get(0); + if (partColStats != null) { + part.setColStats(partColStats); + } + } + } + } + + List processorCapabilities = request.getProcessorCapabilities(); + if (processorCapabilities == null || processorCapabilities.isEmpty() || + processorCapabilities.contains("MANAGERAWMETADATA")) { + LOG.info("Skipping translation for processor with {}", request.getProcessorId()); + } else { + if (handler.getMetadataTransformer() != null) { + ret = handler.getMetadataTransformer().transformPartitions(ret, table, + processorCapabilities, request.getProcessorId()); + } + } + success = rs.commitTransaction(); + } finally { + if (!success) { + rs.rollbackTransaction(); + } + } + return new GetPartitionsResult<>(ret, true); + } + + private GetPartitionsResult getPartitions() throws TException { + if (request.isFetchPartNames()) { + List ret = rs.listPartitionNames(catName, dbName, tblName, (short) args.getMax()); + ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, + filterHook, catName, dbName, tblName, ret); + return new GetPartitionsResult<>(ret, true); + } else { + List ret; + checkLimitNumberOfPartitionsByFilter(NO_FILTER_STRING, args.getMax()); + if (args.getUserName() == null) { + ret = rs.getPartitions(catName, dbName, tblName, args); + } else { + ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args); + } + ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); + return new GetPartitionsResult<>(ret, true); + } + } + + private void checkLimitNumberOfPartitionsByFilter(String filterString, int requestMax) throws TException { + if (exceedsPartitionFetchLimit(requestMax)) { + checkLimitNumberOfPartitions(tblName, rs.getNumPartitionsByFilter(catName, dbName, tblName, filterString)); + } + } + + private GetPartitionsResult getPartitionsByExpr() throws TException { + if (request.isFetchPartNames()) { + List ret = rs.listPartitionNames(catName, dbName, tblName, + args.getDefaultPartName(), args.getExpr(), args.getOrder(), args.getMax()); + ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, + filterHook, catName, dbName, tblName, ret); + return new GetPartitionsResult(ret, true); + } else { + List partitions = new LinkedList<>(); + boolean hasUnknownPartitions = false; + if (exceedsPartitionFetchLimit(args.getMax())) { + // Since partition limit is configured, we need fetch at most (limit + 1) partition names + int max = MetastoreConf.getIntVar(handler.getConf(), MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST) + 1; + List partNames = rs.listPartitionNames(catName, dbName, tblName, args.getDefaultPartName(), + args.getExpr(), null, max); + checkLimitNumberOfPartitions(tblName, partNames.size()); + partitions = rs.getPartitionsByNames(catName, dbName, tblName, + new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build()); + } else { + hasUnknownPartitions = rs.getPartitionsByExpr(catName, dbName, tblName, partitions, args); + } + return new GetPartitionsResult<>(partitions, hasUnknownPartitions); + } + } + + // Check input count exceeding partition limit iff: + // 1. partition limit is enabled. + // 2. input count is greater than the limit. + private boolean exceedsPartitionFetchLimit(int count) { + int partitionLimit = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST); + return partitionLimit > -1 && (count < 0 || count > partitionLimit); + } + + private void checkLimitNumberOfPartitions(String tblName, int numPartitions) throws MetaException { + if (exceedsPartitionFetchLimit(numPartitions)) { + int partitionLimit = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST); + String configName = MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST.toString(); + throw new MetaException(String.format(PARTITION_NUMBER_EXCEED_LIMIT_MSG, numPartitions, + tblName, partitionLimit, configName)); + } + } + + @Override + protected String getMessagePrefix() { + return "GetPartitionsHandler [" + id + "] - Get partitions from " + + TableName.getQualified(catName, dbName, tblName) + ":"; + } + + public record GetPartitionsResult(List result, boolean success) implements Result { + + } + + public static class GetPartitionsRequest extends TAbstractBase { + private final TableName tableName; + private final GetPartitionsArgs getPartitionsArgs; + private final boolean fetchPartNames; + private String engine; + private boolean getColStats; + private List processorCapabilities; + private String processorId; + + private List partitionOrders; + private List partitionKeys; + private boolean applyDistinct; + private boolean isAscending; + private boolean getPartitionValues; + + public GetPartitionsRequest(TableName tableName, + GetPartitionsArgs getPartitionsArgs, + boolean fetchPartNames) { + this.getPartitionsArgs = getPartitionsArgs; + this.tableName = tableName; + this.fetchPartNames = fetchPartNames; + } + + public GetPartitionsRequest(TableName tableName, + GetPartitionsArgs getPartitionsArgs) { + this(tableName, getPartitionsArgs, false); + } + + public String getEngine() { + return engine; + } + + public void setEngine(String engine) { + this.engine = engine; + } + + public boolean isGetColStats() { + return getColStats; + } + + public void setGetColStats(boolean getColStats) { + this.getColStats = getColStats; + } + + public List getProcessorCapabilities() { + return processorCapabilities; + } + + public void setProcessorCapabilities(List processorCapabilities) { + this.processorCapabilities = processorCapabilities; + } + + public String getProcessorId() { + return processorId; + } + + public void setProcessorId(String processorId) { + this.processorId = processorId; + } + + public boolean isAscending() { + return isAscending; + } + + public void setAscending(boolean ascending) { + isAscending = ascending; + } + + public boolean isApplyDistinct() { + return applyDistinct; + } + + public void setApplyDistinct(boolean applyDistinct) { + this.applyDistinct = applyDistinct; + } + + public List getPartitionKeys() { + return partitionKeys; + } + + public void setPartitionKeys(List partitionKeys) { + this.partitionKeys = partitionKeys; + } + + public List getPartitionOrders() { + return partitionOrders; + } + + public void setPartitionOrders(List partitionOrders) { + this.partitionOrders = partitionOrders; + } + + public boolean isGetPartitionValues() { + return getPartitionValues; + } + + public void setGetPartitionValues(boolean getPartitionValues) { + this.getPartitionValues = getPartitionValues; + } + + public TableName getTableName() { + return tableName; + } + + public GetPartitionsArgs getGetPartitionsArgs() { + return getPartitionsArgs; + } + + public boolean isFetchPartNames() { + return fetchPartNames; + } + } + + public static List getPartitions(Consumer preExecutor, + Consumer postExecutor, IHMSHandler handler, TableName tableName, + GetPartitionsArgs args) throws NoSuchObjectException, MetaException { + try { + return getPartitionsResult(preExecutor, postExecutor, handler, tableName, args).result(); + } catch (TException e) { + throw handleException(e).throwIfInstance(NoSuchObjectException.class, MetaException.class) + .defaultMetaException(); + } + } + + public static GetPartitionsResult getPartitionsResult(Consumer preExecutor, + Consumer postExecutor, IHMSHandler handler, TableName tableName, + GetPartitionsArgs args) throws TException { + GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args); + preExecutor.accept(tableName); + Exception ex = null; + try { + GetPartitionsHandler getPartsHandler = + AbstractRequestHandler.offer(handler, getPartitionsRequest); + return getPartsHandler.getResult(); + } catch (Exception e) { + ex = e; + throw handleException(ex).throwIfInstance(TException.class).defaultTException(); + } finally { + postExecutor.accept(ex); + } + } + + public static GetPartitionsResult getPartitionNames(Consumer preExecutor, + Consumer postConsumer, IHMSHandler handler, TableName tableName, + GetPartitionsArgs args) throws TException { + preExecutor.accept(tableName); + Exception ex = null; + try { + GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args, true); + GetPartitionsHandler getPartNamesHandler = + AbstractRequestHandler.offer(handler, getPartitionsRequest); + return getPartNamesHandler.getResult(); + } catch (Exception e) { + ex = e; + throw handleException(ex).throwIfInstance(TException.class).defaultTException(); + } finally { + postConsumer.accept(ex); + } + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TAbstractBase.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TAbstractBase.java new file mode 100644 index 000000000000..af799e9d4049 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/TAbstractBase.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.handler; + +import org.apache.thrift.TBase; +import org.apache.thrift.TException; +import org.apache.thrift.TFieldIdEnum; +import org.apache.thrift.protocol.TProtocol; +import org.jetbrains.annotations.NotNull; + +public class TAbstractBase implements TBase { + @Override + public TFieldIdEnum fieldForId(int i) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isSet(TFieldIdEnum tFieldIdEnum) { + throw new UnsupportedOperationException(); + } + + @Override + public Object getFieldValue(TFieldIdEnum tFieldIdEnum) { + throw new UnsupportedOperationException(); + } + + @Override + public void setFieldValue(TFieldIdEnum tFieldIdEnum, Object o) { + throw new UnsupportedOperationException(); + } + @Override + public TBase deepCopy() { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public int compareTo(@NotNull Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public void read(TProtocol tProtocol) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void write(TProtocol tProtocol) throws TException { + throw new UnsupportedOperationException(); + } +} From 1047e558723bbcfccf2562d4d7e7ddb490d966cc Mon Sep 17 00:00:00 2001 From: zdeng Date: Wed, 4 Feb 2026 15:42:48 +0800 Subject: [PATCH 2/9] get_partitions --- .../metastore/handler/GetPartitionsHandler.java | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java index 86bef3ceb563..ef34c65e4c61 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java @@ -66,11 +66,11 @@ public class GetPartitionsHandler extends AbstractRequestHandler Date: Wed, 4 Feb 2026 15:48:04 +0800 Subject: [PATCH 3/9] fix bugs --- .../hadoop/hive/metastore/HMSHandler.java | 92 ++++++++++--------- .../handler/GetPartitionsHandler.java | 67 ++++++++++---- .../hive/metastore/MetaStoreTestUtils.java | 2 +- .../hive/metastore/TestHiveMetaStore.java | 5 +- .../metastore/TestHiveMetaStoreMethods.java | 3 +- .../TestMetaStoreEndFunctionListener.java | 2 +- .../metastore/client/TestListPartitions.java | 3 +- 7 files changed, 101 insertions(+), 73 deletions(-) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 98908ef2403b..4b6dc3d4ce3c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -28,6 +28,7 @@ import com.google.common.collect.Lists; import com.google.common.util.concurrent.Striped; import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -2847,7 +2848,7 @@ public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) } } catch (Exception e) { ex = e; - throw handleException(e).throwIfInstance(TException.class).defaultMetaException(); + throw handleException(e).defaultTException(); } finally { endFunction("add_partitions_req", ex == null, ex, tblName); } @@ -3206,7 +3207,7 @@ public DropPartitionsResult drop_partitions_req( return resp; } catch (Exception e) { ex = e; - throw handleException(e).throwIfInstance(TException.class).defaultMetaException(); + throw handleException(e).defaultTException(); } finally { endFunction("drop_partition_req", ex == null, ex, TableName.getQualified(request.getCatName(), request.getDbName(), request.getTblName())); @@ -3298,13 +3299,12 @@ public GetPartitionResponse get_partition_req(GetPartitionRequest req) throws MetaException, NoSuchObjectException, TException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); + GetPartitionsHandler.validatePartVals(this, tableName, req.getPartVals()); List partitions = GetPartitionsHandler.getPartitions( t -> startTableFunction("get_partition_req", catName, t.getDb(), t.getCat()), ex -> endFunction("get_partition_req", ex == null, ex, tableName.toString()), - this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().part_vals(req.getPartVals()).build()); - if (partitions == null || partitions.isEmpty()) { - throw new NoSuchObjectException("partition values=" + req.getPartVals()); - } + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().part_vals(req.getPartVals()).build(), + true); return new GetPartitionResponse(partitions.getFirst()); } @@ -3392,17 +3392,14 @@ public Partition get_partition_with_auth(final String db_name, throws TException { String[] parsedDbName = parseDbName(db_name, conf); TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - List partitions = GetPartitionsHandler.getPartitionsResult( + GetPartitionsHandler.validatePartVals(this, tableName, part_vals); + List partitions = GetPartitionsHandler.getPartitions( t -> startFunction("get_partition_with_auth", " : tbl=" + t + samplePartitionValues(part_vals) + getGroupsCountAndUsername(user_name,group_names)), ex -> endFunction("get_partition_with_auth", ex == null, ex, tbl_name), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .part_vals(part_vals).userName(user_name).groupNames(group_names).build()) - .result(); - if (partitions == null || partitions.isEmpty()) { - throw new NoSuchObjectException("partition values=" + part_vals); - } - return partitions.get(0); + .part_vals(part_vals).userName(user_name).groupNames(group_names).build(), true); + return partitions.getFirst(); } /** @@ -3418,7 +3415,7 @@ public List get_partitions(final String db_name, final String tbl_nam return GetPartitionsHandler.getPartitions( t -> startTableFunction("get_partitions", tableName.getCat(), tableName.getDb(), tableName.getTable()), ex -> endFunction("get_partitions", ex == null, ex, tableName.toString()), - this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build()); + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build(), false); } @Override @@ -3434,7 +3431,7 @@ public PartitionsResponse get_partitions_req(PartitionsRequest req) .includeParamKeyPattern(req.getIncludeParamKeyPattern()) .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) - .max(req.getMaxParts()).build()); + .max(req.getMaxParts()).build(), false); return new PartitionsResponse(partitions); } @@ -3578,16 +3575,24 @@ public List get_partition_names(final String db_name, final String tbl_n @Override public List fetch_partition_names_req(final PartitionsRequest req) throws NoSuchObjectException, MetaException { + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + String dbName = req.getDbName(), tblName = req.getTblName(); + TableName tableName = new TableName(catName, dbName, tblName); try { - String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); - String dbName = req.getDbName(), tblName = req.getTblName(); - TableName tableName = new TableName(catName, dbName, tblName); return GetPartitionsHandler.getPartitionNames( t -> startTableFunction("fetch_partition_names_req", catName, dbName, tblName), ex -> endFunction("fetch_partition_names_req", ex == null, ex, tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().max(req.getMaxParts()).build()).result(); } catch (TException ex) { - throw handleException(ex).throwIfInstance(NoSuchObjectException.class, MetaException.class).defaultMetaException(); + if (ex instanceof NoSuchObjectException e) { + // Keep it here just because some tests in TestListPartitions assume NoSuchObjectException + // if the input is invalid. + if (StringUtils.isBlank(dbName) || StringUtils.isBlank(tableName.getTable())) { + throw e; + } + return Collections.emptyList(); + } + throw handleException(ex).defaultMetaException(); } } @@ -4340,7 +4345,7 @@ public String get_config_value(String name, String defaultValue) return toReturn; } catch (Exception e) { ex = e; - throw handleException(e).throwIfInstance(TException.class).defaultMetaException(); + throw handleException(e).defaultTException(); } finally { endFunction("get_config_value", success, ex); } @@ -4379,17 +4384,16 @@ private List getPartValsFromName(RawStore ms, String catName, String dbN @Deprecated public Partition get_partition_by_name(final String db_name, final String tbl_name, final String part_name) throws TException { - + if (StringUtils.isBlank(part_name)) { + throw new MetaException("The part_name in get_partition_by_name cannot be null or empty"); + } String[] parsedDbName = parseDbName(db_name, conf); TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List partitions = GetPartitionsHandler.getPartitions( t -> startFunction("get_partition_by_name", ": tbl=" + t + " part=" + part_name), ex -> endFunction("get_partition_by_name", ex == null, ex, tableName.toString()), this, tableName, - new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(Arrays.asList(part_name)).build()); - if (partitions == null || partitions.isEmpty()) { - throw new NoSuchObjectException(tableName + " partition (" + part_name + ") not found"); - } + new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(List.of(part_name)).build(), true); return partitions.getFirst(); } @@ -4473,7 +4477,7 @@ public List get_partitions_ps_with_auth(final String db_name, t -> startTableFunction("get_partitions_ps_with_auth", t.getCat(), t.getDb(), t.getTable()), ex -> endFunction("get_partitions_ps_with_auth", ex == null, ex, tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .part_vals(part_vals).max(max_parts).userName(userName).groupNames(groupNames).build()); + .part_vals(part_vals).max(max_parts).userName(userName).groupNames(groupNames).build(), false); } @Override @@ -4482,14 +4486,14 @@ public GetPartitionsPsWithAuthResponse get_partitions_ps_with_auth_req(GetPartit String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); List partitions = GetPartitionsHandler.getPartitionsResult( - t -> startTableFunction("get_partition_names_req", catName, t.getDb(), t.getTable()), + t -> startTableFunction("get_partitions_ps_with_auth_req", catName, t.getDb(), t.getTable()), ex -> endFunction("get_partitions_ps_with_auth_req", ex == null, ex, tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .part_vals(req.getPartVals()).max(req.getMaxParts()) - .userName(req.getUserName()).groupNames(req.getGroupNames()) - .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) - .includeParamKeyPattern(req.getIncludeParamKeyPattern()) - .excludeParamKeyPattern(req.getExcludeParamKeyPattern()).partNames(req.getPartNames()).build()) + .part_vals(req.getPartVals()).max(req.getMaxParts()) + .userName(req.getUserName()).groupNames(req.getGroupNames()) + .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) + .includeParamKeyPattern(req.getIncludeParamKeyPattern()) + .excludeParamKeyPattern(req.getExcludeParamKeyPattern()).partNames(req.getPartNames()).build()) .result(); return new GetPartitionsPsWithAuthResponse(partitions); } @@ -4527,6 +4531,9 @@ public List get_partition_names_ps(final String db_name, @Override public GetPartitionNamesPsResponse get_partition_names_ps_req(GetPartitionNamesPsRequest req) throws MetaException, NoSuchObjectException, TException { + if (req.getPartValues() == null) { + throw new MetaException("The partValues in GetPartitionNamesPsRequest is null"); + } String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); String dbName = req.getDbName(), tblName = req.getTblName(); TableName tableName = new TableName(catName, dbName, tblName); @@ -4534,7 +4541,7 @@ public GetPartitionNamesPsResponse get_partition_names_ps_req(GetPartitionNamesP t -> startTableFunction("get_partition_names_ps_req", catName, dbName, tblName), ex -> endFunction("get_partition_names_ps_req", ex == null, ex, tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .max(req.getMaxParts()).part_vals(req.getPartValues()).build()) + .max(req.getMaxParts()).part_vals(req.getPartValues()).build()) .result(); GetPartitionNamesPsResponse res = new GetPartitionNamesPsResponse(); res.setNames(names); @@ -5034,6 +5041,9 @@ public int get_num_partitions_by_filter(final String dbName, public List get_partitions_by_names(final String dbName, final String tblName, final List partNames) throws TException { + if (partNames == null) { + throw new MetaException("The partNames is null"); + } String[] dbNameParts = parseDbName(dbName, conf); TableName tableName = new TableName(dbNameParts[CAT_NAME], dbNameParts[DB_NAME], tblName); return GetPartitionsHandler.getPartitionsResult( @@ -5046,6 +5056,9 @@ public List get_partitions_by_names(final String dbName, final String @Override public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNamesRequest gpbnr) throws TException { + if (gpbnr.getNames() == null) { + throw new MetaException("The names in GetPartitionsByNamesRequest is null"); + } String[] dbNameParts = parseDbName(gpbnr.getDb_name(), conf); TableName tableName = new TableName(dbNameParts[CAT_NAME], dbNameParts[DB_NAME], gpbnr.getTbl_name()); GetPartitionsHandler.GetPartitionsRequest request = @@ -5065,7 +5078,7 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam return new GetPartitionsByNamesResult(partitions); } catch (Exception e) { ex = e; - throw handleException(e).throwIfInstance(TException.class).defaultTException(); + throw handleException(e).defaultTException(); } finally { endFunction("get_partitions_by_names", ex == null, ex, tableName.toString()); } @@ -6741,17 +6754,6 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest req) throws TExcepti } } - private Table getTable(String catName, String dbName, String tableName, - String writeIdList) - throws MetaException, InvalidObjectException { - Table t = getMS().getTable(catName, dbName, tableName, writeIdList); - if (t == null) { - throw new InvalidObjectException(TableName.getQualified(catName, dbName, tableName) - + " table not found"); - } - return t; - } - @Override public NotificationEventResponse get_next_notification(NotificationEventRequest rqst) throws TException { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java index ef34c65e4c61..916f963e4898 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java @@ -107,6 +107,9 @@ protected void beforeExecute() throws TException, IOException { this.table = handler.get_table_core(getTableRequest); ((HMSHandler) handler).firePreEvent(new PreReadTableEvent(table, handler)); authorizeTableForPartitionMetadata(); + + LOG.info("Starting to get {} of {} using {}", request.isFetchPartNames() ? "partition names" : "partitions", + TableName.getQualified(catName, dbName, tblName), getMethod); } @Override @@ -196,7 +199,6 @@ private GetPartitionsResult getPartitionsByNames() throws TException boolean success = false; rs.openTransaction(); try { - rs.openTransaction(); checkLimitNumberOfPartitions(tblName, args.getPartNames().size()); ret = rs.getPartitionsByNames(catName, dbName, tblName, args); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); @@ -237,7 +239,7 @@ private GetPartitionsResult getPartitionsByNames() throws TException rs.rollbackTransaction(); } } - return new GetPartitionsResult<>(ret, true); + return new GetPartitionsResult<>(ret, success); } private GetPartitionsResult getPartitions() throws TException { @@ -249,11 +251,7 @@ private GetPartitionsResult getPartitions() throws TException { } else { List ret; checkLimitNumberOfPartitionsByFilter(NO_FILTER_STRING, args.getMax()); - if (args.getUserName() == null) { - ret = rs.getPartitions(catName, dbName, tblName, args); - } else { - ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args); - } + ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args); ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); return new GetPartitionsResult<>(ret, true); } @@ -430,50 +428,79 @@ public boolean isFetchPartNames() { } } - public static List getPartitions(Consumer preExecutor, - Consumer postExecutor, IHMSHandler handler, TableName tableName, - GetPartitionsArgs args) throws NoSuchObjectException, MetaException { + public static List getPartitions(Consumer preHook, + Consumer postHook, IHMSHandler handler, TableName tableName, + GetPartitionsArgs args, boolean assumeResult) throws NoSuchObjectException, MetaException { + Exception ex = null; try { - return getPartitionsResult(preExecutor, postExecutor, handler, tableName, args).result(); - } catch (TException e) { + GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args); + preHook.accept(tableName); + GetPartitionsHandler getPartsHandler = + AbstractRequestHandler.offer(handler, getPartitionsRequest); + List partitions = getPartsHandler.getResult().result(); + if (assumeResult && (partitions == null || partitions.isEmpty())) { + throw new NoSuchObjectException(tableName + " partition not found"); + } + return partitions; + } catch (Exception e) { + ex = e; throw handleException(e).throwIfInstance(NoSuchObjectException.class, MetaException.class) .defaultMetaException(); + } finally { + postHook.accept(ex); } } - public static GetPartitionsResult getPartitionsResult(Consumer preExecutor, - Consumer postExecutor, IHMSHandler handler, TableName tableName, + public static GetPartitionsResult getPartitionsResult( + Consumer preHook, + Consumer postHook, + IHMSHandler handler, TableName tableName, GetPartitionsArgs args) throws TException { - GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args); - preExecutor.accept(tableName); Exception ex = null; try { + GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args); + preHook.accept(tableName); GetPartitionsHandler getPartsHandler = AbstractRequestHandler.offer(handler, getPartitionsRequest); return getPartsHandler.getResult(); } catch (Exception e) { ex = e; - throw handleException(ex).throwIfInstance(TException.class).defaultTException(); + throw handleException(ex).defaultTException(); } finally { - postExecutor.accept(ex); + postHook.accept(ex); } } public static GetPartitionsResult getPartitionNames(Consumer preExecutor, Consumer postConsumer, IHMSHandler handler, TableName tableName, GetPartitionsArgs args) throws TException { - preExecutor.accept(tableName); Exception ex = null; try { + preExecutor.accept(tableName); GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args, true); GetPartitionsHandler getPartNamesHandler = AbstractRequestHandler.offer(handler, getPartitionsRequest); return getPartNamesHandler.getResult(); } catch (Exception e) { ex = e; - throw handleException(ex).throwIfInstance(TException.class).defaultTException(); + throw handleException(ex).defaultTException(); } finally { postConsumer.accept(ex); } } + + public static void validatePartVals(IHMSHandler handler, + TableName tableName, List partVals) throws MetaException, NoSuchObjectException { + if (partVals == null || partVals.isEmpty()) { + throw new MetaException("The partVals is null or empty"); + } + GetTableRequest request = new GetTableRequest(tableName.getDb(), tableName.getTable()); + request.setCatName(tableName.getCat()); + Table table = handler.get_table_core(request); + int size = table.getPartitionKeysSize(); + if (size != partVals.size()) { + throw new MetaException("Unmatched partition values, partition keys size: " + + size + ", partition values size: " + partVals.size()); + } + } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index 61fa385aaa3d..3c927bb90f06 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -284,7 +284,7 @@ private static void loopUntilZKReady(Configuration conf, String msHost, int port } return; } catch (Exception e) { - if (retries++ > 60) { //give up + if (retries++ > -1) { //give up // Metastore URI is not visible from the ZooKeeper yet. LOG.error("Unable to get metastore URI from the ZooKeeper: " + e.getMessage()); throw e; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 9e52ca13b1fa..939f09c3343e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -1147,7 +1147,7 @@ public void testRenamePartition() throws Throwable { } } - @Test(expected = InvalidObjectException.class) + @Test(expected = NoSuchObjectException.class) public void testDropTableFetchPartitions() throws Throwable { String dbName = "fetchPartitionsDb"; String tblName = "fetchPartitionsTbl"; @@ -2570,8 +2570,7 @@ public void testPartitionFilter() throws Exception { me = e; } assertNotNull(me); - assertTrue("NoSuchObject exception", me.getMessage().contains( - "Specified catalog.database.table does not exist : hive.invdbname.invtablename")); + assertTrue("NoSuchObject exception", me.getMessage().contains("hive.invdbname.invtablename table not found")); client.dropTable(dbName, tblName); client.dropDatabase(dbName); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java index 090007324220..382f8c9fe35a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.Before; @@ -55,7 +56,7 @@ protected void initConf() { } } - @Test(expected = InvalidObjectException.class) + @Test(expected = NoSuchObjectException.class) public void test_get_partitions_by_names() throws Exception { hmsHandler.get_partitions_by_names("dbName", "tblName", Arrays.asList("partNames")); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index b919eeffe25a..f81c47d4a156 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -125,7 +125,7 @@ public void testEndFunctionListener() throws Exception { e = context.getException(); assertTrue((e!=null)); assertTrue((e instanceof NoSuchObjectException)); - assertEquals(context.getInputTableName(), tblName); + assertEquals(context.getInputTableName(), "hive.hive3524." + tblName); try { msc.dropTable(dbName, unknownTable); } catch (Exception e4) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java index abaf28217cae..ec602d0b4998 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java @@ -1618,8 +1618,7 @@ public void testListPartitionValuesNullSchema() throws Exception { null); client.listPartitionValues(request); fail("Should have thrown exception"); - } catch (NullPointerException | TProtocolException e) { - //TODO: should not throw different exceptions for different HMS deployment types + } catch (MetaException | TProtocolException e) { } } From 95af6056241c7f995aba562211cae31a88907222 Mon Sep 17 00:00:00 2001 From: zdeng Date: Wed, 4 Feb 2026 11:24:11 +0800 Subject: [PATCH 4/9] append partition --- .../hadoop/hive/metastore/HMSHandler.java | 186 ++--------------- .../handler/AppendPartitionHandler.java | 190 ++++++++++++++++++ 2 files changed, 209 insertions(+), 167 deletions(-) create mode 100644 standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AppendPartitionHandler.java diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 4b6dc3d4ce3c..4ba06255a75c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.events.*; import org.apache.hadoop.hive.metastore.handler.AbstractRequestHandler; import org.apache.hadoop.hive.metastore.handler.AddPartitionsHandler; +import org.apache.hadoop.hive.metastore.handler.AppendPartitionHandler; import org.apache.hadoop.hive.metastore.handler.DropPartitionsHandler; import org.apache.hadoop.hive.metastore.handler.GetPartitionsHandler; import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType; @@ -93,7 +94,6 @@ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_IN_TEST; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.canUpdateStats; import static org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.isDbReplicationTarget; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME; @@ -2625,104 +2625,6 @@ public List get_table_names_by_filter( return tables; } - private Partition append_partition_common(RawStore ms, String catName, String dbName, - String tableName, List part_vals, - EnvironmentContext envContext) - throws InvalidObjectException, AlreadyExistsException, MetaException, NoSuchObjectException { - - Partition part = new Partition(); - boolean success = false, madeDir = false; - Path partLocation = null; - Table tbl = null; - Map transactionalListenerResponses = Collections.emptyMap(); - Database db = null; - try { - ms.openTransaction(); - part.setCatName(catName); - part.setDbName(dbName); - part.setTableName(tableName); - part.setValues(part_vals); - - MetaStoreServerUtils.validatePartitionNameCharacters(part_vals, getConf()); - - tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), null); - if (tbl == null) { - throw new InvalidObjectException( - "Unable to add partition because table or database do not exist"); - } - if (tbl.getSd().getLocation() == null) { - throw new MetaException( - "Cannot append a partition to a view"); - } - - db = get_database_core(catName, dbName); - - firePreEvent(new PreAddPartitionEvent(tbl, part, this)); - - part.setSd(tbl.getSd().deepCopy()); - partLocation = new Path(tbl.getSd().getLocation(), Warehouse - .makePartName(tbl.getPartitionKeys(), part_vals)); - part.getSd().setLocation(partLocation.toString()); - - Partition old_part; - try { - old_part = ms.getPartition(part.getCatName(), part.getDbName(), part - .getTableName(), part.getValues()); - } catch (NoSuchObjectException e) { - // this means there is no existing partition - old_part = null; - } - if (old_part != null) { - throw new AlreadyExistsException("Partition already exists:" + part); - } - - if (!wh.isDir(partLocation)) { - if (!wh.mkdirs(partLocation)) { - throw new MetaException(partLocation - + " is not a directory or unable to create one"); - } - madeDir = true; - } - - // set create time - long time = System.currentTimeMillis() / 1000; - part.setCreateTime((int) time); - part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time)); - - if (canUpdateStats(getConf(), tbl)) { - MetaStoreServerUtils.updatePartitionStatsFast(part, tbl, wh, madeDir, false, envContext, true); - } - - if (ms.addPartition(part)) { - if (!transactionalListeners.isEmpty()) { - transactionalListenerResponses = - MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, part, true, this), - envContext); - } - - success = ms.commitTransaction(); - } - } finally { - if (!success) { - ms.rollbackTransaction(); - if (madeDir) { - wh.deleteDir(partLocation, false, ReplChangeManager.shouldEnableCm(db, tbl)); - } - } - - if (!listeners.isEmpty()) { - MetaStoreListenerNotifier.notifyEvent(listeners, - EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, part, success, this), - envContext, - transactionalListenerResponses, ms); - } - } - return part; - } - public void firePreEvent(PreEventContext event) throws MetaException { for (MetaStorePreEventListener listener : preListeners) { try { @@ -2755,62 +2657,35 @@ public Partition append_partition_with_environment_context(final String dbName, final String tableName, final List part_vals, final EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException { String[] parsedDbName = parseDbName(dbName, conf); - startPartitionFunction("append_partition_with_environment_context", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals); - Partition ret = null; - Exception ex = null; - try { - AppendPartitionsRequest appendPartitionsReq = new AppendPartitionsRequest(); - appendPartitionsReq.setDbName(parsedDbName[DB_NAME]); - appendPartitionsReq.setTableName(tableName); - appendPartitionsReq.setPartVals(part_vals); - appendPartitionsReq.setCatalogName(parsedDbName[CAT_NAME]); - appendPartitionsReq.setEnvironmentContext(envContext); - ret = append_partition_req(appendPartitionsReq); - } catch (Exception e) { - ex = e; - throw handleException(e).throwIfInstance(MetaException.class, InvalidObjectException.class, AlreadyExistsException.class) - .defaultMetaException(); - } finally { - endFunction("append_partition_with_environment_context", ret != null, ex, tableName); - } - return ret; + AppendPartitionsRequest appendPartitionsReq = new AppendPartitionsRequest(); + appendPartitionsReq.setDbName(parsedDbName[DB_NAME]); + appendPartitionsReq.setTableName(tableName); + appendPartitionsReq.setPartVals(part_vals); + appendPartitionsReq.setCatalogName(parsedDbName[CAT_NAME]); + appendPartitionsReq.setEnvironmentContext(envContext); + return append_partition_req(appendPartitionsReq); } @Override public Partition append_partition_req(final AppendPartitionsRequest appendPartitionsReq) throws InvalidObjectException, AlreadyExistsException, MetaException { - List part_vals = appendPartitionsReq.getPartVals(); String dbName = appendPartitionsReq.getDbName(); String catName = appendPartitionsReq.isSetCatalogName() ? appendPartitionsReq.getCatalogName() : getDefaultCatalog(conf); String tableName = appendPartitionsReq.getTableName(); - String partName = appendPartitionsReq.getName(); - if (partName == null && (part_vals == null || part_vals.isEmpty())) { - throw new MetaException("The partition values must not be null or empty."); - } - if (part_vals == null || part_vals.isEmpty()) { - // partition name is set, get partition vals and then append partition - part_vals = getPartValsFromName(getMS(), catName, dbName, tableName, partName); - } - startPartitionFunction("append_partition_req", catName, dbName, tableName, part_vals); - if (LOG.isDebugEnabled()) { - for (String part : part_vals) { - LOG.debug(part); - } - } - Partition ret = null; + startTableFunction("append_partition_req", catName, dbName, tableName); Exception ex = null; try { - ret = append_partition_common(getMS(), catName, dbName, tableName, part_vals, appendPartitionsReq.getEnvironmentContext()); + AppendPartitionHandler appendPartition = AbstractRequestHandler.offer(this, appendPartitionsReq); + return appendPartition.getResult().partition(); } catch (Exception e) { ex = e; throw handleException(e) .throwIfInstance(MetaException.class, InvalidObjectException.class, AlreadyExistsException.class) .defaultMetaException(); } finally { - endFunction("append_partition_req", ret != null, ex, tableName); + endFunction("append_partition_req", ex == null, ex, tableName); } - return ret; } public Lock getTableLockFor(String dbName, String tblName) { @@ -4369,17 +4244,6 @@ public static List getPartValsFromName(Table t, String partName) return partVals; } - private List getPartValsFromName(RawStore ms, String catName, String dbName, - String tblName, String partName) - throws MetaException, InvalidObjectException { - Table t = ms.getTable(catName, dbName, tblName, null); - if (t == null) { - throw new InvalidObjectException(dbName + "." + tblName - + " table not found"); - } - return getPartValsFromName(t, partName); - } - @Override @Deprecated public Partition get_partition_by_name(final String db_name, final String tbl_name, @@ -4410,25 +4274,13 @@ public Partition append_partition_by_name_with_environment_context(final String final String tbl_name, final String part_name, final EnvironmentContext env_context) throws TException { String[] parsedDbName = parseDbName(db_name, conf); - Partition ret = null; - Exception ex = null; - try { - AppendPartitionsRequest appendPartitionRequest = new AppendPartitionsRequest(); - appendPartitionRequest.setDbName(parsedDbName[DB_NAME]); - appendPartitionRequest.setTableName(tbl_name); - appendPartitionRequest.setName(part_name); - appendPartitionRequest.setCatalogName(parsedDbName[CAT_NAME]); - appendPartitionRequest.setEnvironmentContext(env_context); - ret = append_partition_req(appendPartitionRequest); - } catch (Exception e) { - ex = e; - throw handleException(e) - .throwIfInstance(InvalidObjectException.class, AlreadyExistsException.class, MetaException.class) - .defaultMetaException(); - } finally { - endFunction("append_partition_by_name", ret != null, ex, tbl_name); - } - return ret; + AppendPartitionsRequest appendPartitionRequest = new AppendPartitionsRequest(); + appendPartitionRequest.setDbName(parsedDbName[DB_NAME]); + appendPartitionRequest.setTableName(tbl_name); + appendPartitionRequest.setName(part_name); + appendPartitionRequest.setCatalogName(parsedDbName[CAT_NAME]); + appendPartitionRequest.setEnvironmentContext(env_context); + return append_partition_req(appendPartitionRequest); } @Deprecated diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AppendPartitionHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AppendPartitionHandler.java new file mode 100644 index 000000000000..fb94c5bfb297 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/AppendPartitionHandler.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.handler; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.HMSHandler; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.ReplChangeManager; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.AppendPartitionsRequest; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.hive.metastore.HMSHandler.getPartValsFromName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.canUpdateStats; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.updatePartitionStatsFast; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.validatePartitionNameCharacters; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + +@RequestHandler(requestBody = AppendPartitionsRequest.class) +public class AppendPartitionHandler + extends AbstractRequestHandler { + private static final Logger LOG = LoggerFactory.getLogger(AppendPartitionHandler.class); + private RawStore ms; + private String catName; + private String dbName; + private String tableName; + private List partVals; + private Table tbl; + private Warehouse wh; + + AppendPartitionHandler(IHMSHandler handler, AppendPartitionsRequest request) { + super(handler, false, request); + } + + @Override + protected void beforeExecute() throws TException, IOException { + List part_vals = request.getPartVals(); + dbName = normalizeIdentifier(request.getDbName()); + catName = normalizeIdentifier(request.isSetCatalogName() ? + request.getCatalogName() : getDefaultCatalog(handler.getConf())); + tableName = normalizeIdentifier(request.getTableName()); + String partName = request.getName(); + if (partName == null && (part_vals == null || part_vals.isEmpty())) { + throw new MetaException("The partition values must not be null or empty."); + } + + ms = handler.getMS(); + wh = handler.getWh(); + tbl = ms.getTable(catName, dbName, tableName, null); + if (tbl == null) { + throw new InvalidObjectException(dbName + "." + tableName + " table not found"); + } + if (tbl.getSd().getLocation() == null) { + throw new MetaException("Cannot append a partition to a view"); + } + if (part_vals == null || part_vals.isEmpty()) { + // partition name is set, get partition vals and then append partition + part_vals = getPartValsFromName(tbl, partName); + } + this.partVals = part_vals; + Partition old_part; + try { + old_part = ms.getPartition(catName, dbName, tableName, partVals); + } catch (NoSuchObjectException e) { + // this means there is no existing partition + old_part = null; + } + if (old_part != null) { + throw new AlreadyExistsException("Partition already exists:" + part_vals); + } + LOG.debug("Append partition: {}", part_vals); + validatePartitionNameCharacters(partVals, handler.getConf()); + } + + @Override + protected AppendPartitionResult execute() throws TException, IOException { + Partition part = new Partition(); + part.setCatName(catName); + part.setDbName(dbName); + part.setTableName(tableName); + part.setValues(partVals); + + boolean success = false, madeDir = false; + Path partLocation = null; + Map transactionalListenerResponses = Collections.emptyMap(); + Database db = null; + try { + ms.openTransaction(); + db = handler.get_database_core(catName, dbName); + ((HMSHandler) handler).firePreEvent(new PreAddPartitionEvent(tbl, part, handler)); + + part.setSd(tbl.getSd().deepCopy()); + partLocation = new Path(tbl.getSd().getLocation(), Warehouse + .makePartName(tbl.getPartitionKeys(), partVals)); + part.getSd().setLocation(partLocation.toString()); + + if (!wh.isDir(partLocation)) { + if (!wh.mkdirs(partLocation)) { + throw new MetaException(partLocation + + " is not a directory or unable to create one"); + } + madeDir = true; + } + + // set create time + long time = System.currentTimeMillis() / 1000; + part.setCreateTime((int) time); + part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time)); + if (canUpdateStats(handler.getConf(), tbl)) { + updatePartitionStatsFast(part, tbl, wh, madeDir, false, request.getEnvironmentContext(), true); + } + + if (ms.addPartition(part)) { + if (!handler.getTransactionalListeners().isEmpty()) { + transactionalListenerResponses = + MetaStoreListenerNotifier.notifyEvent(handler.getTransactionalListeners(), + EventMessage.EventType.ADD_PARTITION, + new AddPartitionEvent(tbl, part, true, handler), + request.getEnvironmentContext()); + } + + success = ms.commitTransaction(); + } + } finally { + if (!success) { + ms.rollbackTransaction(); + if (madeDir) { + wh.deleteDir(partLocation, false, ReplChangeManager.shouldEnableCm(db, tbl)); + } + } + + if (!handler.getListeners().isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(handler.getListeners(), + EventMessage.EventType.ADD_PARTITION, + new AddPartitionEvent(tbl, part, success, handler), + request.getEnvironmentContext(), + transactionalListenerResponses, ms); + } + } + return new AppendPartitionResult(part, success); + } + + @Override + protected String getMessagePrefix() { + return "AppendPartitionHandler [" + id + "] - Append partition for " + + TableName.getQualified(catName, dbName, tableName) + ":"; + } + + public record AppendPartitionResult(Partition partition, boolean success) implements Result { + + } +} From 98c755afd546d944f41feef46789895a8e68f90f Mon Sep 17 00:00:00 2001 From: zdeng Date: Tue, 10 Feb 2026 09:39:10 +0800 Subject: [PATCH 5/9] MetaStoreTestUtils --- .../org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index 3c927bb90f06..61fa385aaa3d 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -284,7 +284,7 @@ private static void loopUntilZKReady(Configuration conf, String msHost, int port } return; } catch (Exception e) { - if (retries++ > -1) { //give up + if (retries++ > 60) { //give up // Metastore URI is not visible from the ZooKeeper yet. LOG.error("Unable to get metastore URI from the ZooKeeper: " + e.getMessage()); throw e; From 26b212e99b6ce2bfae8062a77405f82ca809fcc0 Mon Sep 17 00:00:00 2001 From: zdeng Date: Tue, 10 Feb 2026 10:16:17 +0800 Subject: [PATCH 6/9] review-1 --- .../hadoop/hive/metastore/HMSHandler.java | 8 ++--- .../handler/GetPartitionsHandler.java | 30 +++++++++++++++++-- .../metastore/TestHiveMetaStoreMethods.java | 1 - 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 4ba06255a75c..d6b9039902c9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -3176,7 +3176,7 @@ public GetPartitionResponse get_partition_req(GetPartitionRequest req) TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); GetPartitionsHandler.validatePartVals(this, tableName, req.getPartVals()); List partitions = GetPartitionsHandler.getPartitions( - t -> startTableFunction("get_partition_req", catName, t.getDb(), t.getCat()), + t -> startTableFunction("get_partition_req", catName, t.getDb(), t.getTable()), ex -> endFunction("get_partition_req", ex == null, ex, tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().part_vals(req.getPartVals()).build(), true); @@ -4837,7 +4837,7 @@ public PartitionsSpecByExprResult get_partitions_spec_by_expr( Table table = get_table_core(getTableRequest); List partitionSpecs = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(table, result.result()); - return new PartitionsSpecByExprResult(partitionSpecs, result.success()); + return new PartitionsSpecByExprResult(partitionSpecs, result.hasUnknownPartitions()); } @Override @@ -4858,7 +4858,7 @@ public PartitionsByExprResult get_partitions_by_expr( .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) .includeParamKeyPattern(req.getIncludeParamKeyPattern()).build()); - return new PartitionsByExprResult(result.result(), result.success()); + return new PartitionsByExprResult(result.result(), result.hasUnknownPartitions()); } @Override @@ -4921,7 +4921,7 @@ public GetPartitionsByNamesResult get_partitions_by_names_req(GetPartitionsByNam request.setEngine(gpbnr.getEngine()); request.setGetColStats(gpbnr.isSetGet_col_stats() && gpbnr.isGet_col_stats()); request.setProcessorId(gpbnr.getProcessorIdentifier()); - request.setProcessorCapabilities(request.getProcessorCapabilities()); + request.setProcessorCapabilities(gpbnr.getProcessorCapabilities()); startTableFunction("get_partitions_by_names", tableName.getCat(), tableName.getDb(), tableName.getTable()); Exception ex = null; try { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java index 916f963e4898..737133604da6 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java @@ -284,7 +284,9 @@ private GetPartitionsResult getPartitionsByExpr() throws TException { } else { hasUnknownPartitions = rs.getPartitionsByExpr(catName, dbName, tblName, partitions, args); } - return new GetPartitionsResult<>(partitions, hasUnknownPartitions); + GetPartitionsResult result = new GetPartitionsResult<>(partitions, true); + result.setHasUnknownPartitions(hasUnknownPartitions); + return result; } } @@ -311,8 +313,32 @@ protected String getMessagePrefix() { TableName.getQualified(catName, dbName, tblName) + ":"; } - public record GetPartitionsResult(List result, boolean success) implements Result { + public static class GetPartitionsResult implements Result { + private final List result; + private final boolean success; + private boolean hasUnknownPartitions; + public GetPartitionsResult(List getPartsResult, boolean success) { + this.result = getPartsResult; + this.success = success; + } + + public void setHasUnknownPartitions(boolean unknownPartitions) { + this.hasUnknownPartitions = unknownPartitions; + } + + public boolean hasUnknownPartitions() { + return hasUnknownPartitions; + } + + @Override + public boolean success() { + return success; + } + + public List result() { + return result; + } } public static class GetPartitionsRequest extends TAbstractBase { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java index 382f8c9fe35a..a083a50ae0bb 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreMethods.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; From cbd0098d385b7c319f2474dd447e5547629f2f9a Mon Sep 17 00:00:00 2001 From: zdeng Date: Tue, 10 Feb 2026 10:33:38 +0800 Subject: [PATCH 7/9] review-2 --- .../hadoop/hive/metastore/HMSHandler.java | 2 -- .../handler/GetPartitionsHandler.java | 32 ++++++++++++------- .../TestMetaStoreEndFunctionListener.java | 3 +- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index d6b9039902c9..791739fc7beb 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -3485,8 +3485,6 @@ public PartitionValuesResponse get_partition_values(PartitionValuesRequest reque (int) maxParts, filter); Exception ex = null; try { - // This is serious black magic, as the following 2 lines do nothing AFAICT but without them - // the subsequent call to listPartitionValues fails. getPartitionsRequest.setApplyDistinct(request.isApplyDistinct()); getPartitionsRequest.setAscending(request.isAscending()); getPartitionsRequest.setPartitionKeys(request.getPartitionKeys()); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java index 737133604da6..fc89cd765798 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.metastore.handler; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.LinkedList; import java.util.List; @@ -128,8 +127,6 @@ private GetPartitionsResult getPartitionsByVals() throws TException { if (request.isFetchPartNames()) { List ret = rs.listPartitionNamesPs(catName, dbName, tblName, args.getPart_vals(), (short) args.getMax()); - ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, - filterHook, catName, dbName, tblName, ret); return new GetPartitionsResult<>(ret, true); } else { List ret; @@ -147,7 +144,7 @@ private GetPartitionsResult getPartitionValues() throws MetaException { PartitionValuesResponse resp = rs.listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(), request.isApplyDistinct(), args.getFilter(), request.isAscending(), request.getPartitionOrders(), args.getMax()); - return new GetPartitionsResult<>(Arrays.asList(resp), true); + return new GetPartitionsResult<>(List.of(resp), true); } private void checkLimitNumberOfPartitionsByPs(List partVals, int requestMax) @@ -172,7 +169,6 @@ private GetPartitionsResult getPartitionsByFilter() throws TException ret = rs.getPartitionsByFilter(catName, dbName, tblName, args); } - ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); return new GetPartitionsResult<>(ret, true); } @@ -245,14 +241,11 @@ private GetPartitionsResult getPartitionsByNames() throws TException private GetPartitionsResult getPartitions() throws TException { if (request.isFetchPartNames()) { List ret = rs.listPartitionNames(catName, dbName, tblName, (short) args.getMax()); - ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, - filterHook, catName, dbName, tblName, ret); return new GetPartitionsResult<>(ret, true); } else { List ret; checkLimitNumberOfPartitionsByFilter(NO_FILTER_STRING, args.getMax()); ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args); - ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); return new GetPartitionsResult<>(ret, true); } } @@ -267,8 +260,6 @@ private GetPartitionsResult getPartitionsByExpr() throws TException { if (request.isFetchPartNames()) { List ret = rs.listPartitionNames(catName, dbName, tblName, args.getDefaultPartName(), args.getExpr(), args.getOrder(), args.getMax()); - ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, - filterHook, catName, dbName, tblName, ret); return new GetPartitionsResult(ret, true); } else { List partitions = new LinkedList<>(); @@ -307,6 +298,21 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions) thr } } + @Override + protected void afterExecute(GetPartitionsResult result) throws TException, IOException { + if (result != null && result.success()) { + List ret = result.result(); + if (request.isFetchPartNames()) { + ret = FilterUtils.filterPartitionNamesIfEnabled(isServerFilterEnabled, + filterHook, catName, dbName, tblName, ret); + } else if (!request.isGetPartitionValues() && getMethod != GetPartitionsMethod.NAMES) { + // GetPartitionsMethod.NAMES has already selected the result + ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); + } + result.setResult(ret); + } + } + @Override protected String getMessagePrefix() { return "GetPartitionsHandler [" + id + "] - Get partitions from " + @@ -314,7 +320,7 @@ protected String getMessagePrefix() { } public static class GetPartitionsResult implements Result { - private final List result; + private List result; private final boolean success; private boolean hasUnknownPartitions; @@ -327,6 +333,10 @@ public void setHasUnknownPartitions(boolean unknownPartitions) { this.hasUnknownPartitions = unknownPartitions; } + public void setResult(List result) { + this.result = result; + } + public boolean hasUnknownPartitions() { return hasUnknownPartitions; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index f81c47d4a156..fa0c422f4713 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -125,7 +125,8 @@ public void testEndFunctionListener() throws Exception { e = context.getException(); assertTrue((e!=null)); assertTrue((e instanceof NoSuchObjectException)); - assertEquals(context.getInputTableName(), "hive.hive3524." + tblName); + assertEquals(context.getInputTableName(), + Warehouse.DEFAULT_CATALOG_NAME + "." + dbName + "." + tblName); try { msc.dropTable(dbName, unknownTable); } catch (Exception e4) { From 4b27bcbdde09a4c69c29ef38a62e76cf7c201019 Mon Sep 17 00:00:00 2001 From: zdeng Date: Tue, 10 Feb 2026 11:32:38 +0800 Subject: [PATCH 8/9] review-4 --- .../hadoop/hive/metastore/HMSHandler.java | 48 ++++++++++++------- .../handler/GetPartitionsHandler.java | 27 +++++++---- 2 files changed, 50 insertions(+), 25 deletions(-) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 791739fc7beb..e22a47584ac1 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -3177,7 +3177,8 @@ public GetPartitionResponse get_partition_req(GetPartitionRequest req) GetPartitionsHandler.validatePartVals(this, tableName, req.getPartVals()); List partitions = GetPartitionsHandler.getPartitions( t -> startTableFunction("get_partition_req", catName, t.getDb(), t.getTable()), - ex -> endFunction("get_partition_req", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partition_req", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().part_vals(req.getPartVals()).build(), true); return new GetPartitionResponse(partitions.getFirst()); @@ -3271,7 +3272,8 @@ public Partition get_partition_with_auth(final String db_name, List partitions = GetPartitionsHandler.getPartitions( t -> startFunction("get_partition_with_auth", " : tbl=" + t + samplePartitionValues(part_vals) + getGroupsCountAndUsername(user_name,group_names)), - ex -> endFunction("get_partition_with_auth", ex == null, ex, tbl_name), + rex -> endFunction("get_partition_with_auth", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tbl_name), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .part_vals(part_vals).userName(user_name).groupNames(group_names).build(), true); return partitions.getFirst(); @@ -3289,7 +3291,8 @@ public List get_partitions(final String db_name, final String tbl_nam TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); return GetPartitionsHandler.getPartitions( t -> startTableFunction("get_partitions", tableName.getCat(), tableName.getDb(), tableName.getTable()), - ex -> endFunction("get_partitions", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partitions", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build(), false); } @@ -3300,7 +3303,8 @@ public PartitionsResponse get_partitions_req(PartitionsRequest req) TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); List partitions = GetPartitionsHandler.getPartitions( t -> startTableFunction("get_partitions_req", catName, req.getDbName(), req.getTblName()), - ex -> endFunction("get_partitions_req", ex == null, ex, + rex -> endFunction("get_partitions_req", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), TableName.getQualified(catName, req.getDbName(), req.getTblName())), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .includeParamKeyPattern(req.getIncludeParamKeyPattern()) @@ -3319,7 +3323,8 @@ public List get_partitions_with_auth(final String dbName, TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); return GetPartitionsHandler.getPartitionsResult( t -> startTableFunction("get_partitions_with_auth", t.getCat(), t.getDb(), t.getTable()), - ex -> endFunction("get_partitions_with_auth", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partitions_with_auth", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .max(maxParts).userName(userName).groupNames(groupNames).build()) .result(); @@ -3456,7 +3461,8 @@ public List fetch_partition_names_req(final PartitionsRequest req) try { return GetPartitionsHandler.getPartitionNames( t -> startTableFunction("fetch_partition_names_req", catName, dbName, tblName), - ex -> endFunction("fetch_partition_names_req", ex == null, ex, tableName.toString()), this, tableName, + rex -> endFunction("fetch_partition_names_req", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().max(req.getMaxParts()).build()).result(); } catch (TException ex) { if (ex instanceof NoSuchObjectException e) { @@ -4253,7 +4259,8 @@ public Partition get_partition_by_name(final String db_name, final String tbl_na TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List partitions = GetPartitionsHandler.getPartitions( t -> startFunction("get_partition_by_name", ": tbl=" + t + " part=" + part_name), - ex -> endFunction("get_partition_by_name", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partition_by_name", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(List.of(part_name)).build(), true); return partitions.getFirst(); @@ -4325,7 +4332,8 @@ public List get_partitions_ps_with_auth(final String db_name, TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); return GetPartitionsHandler.getPartitions( t -> startTableFunction("get_partitions_ps_with_auth", t.getCat(), t.getDb(), t.getTable()), - ex -> endFunction("get_partitions_ps_with_auth", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partitions_ps_with_auth", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .part_vals(part_vals).max(max_parts).userName(userName).groupNames(groupNames).build(), false); } @@ -4337,7 +4345,8 @@ public GetPartitionsPsWithAuthResponse get_partitions_ps_with_auth_req(GetPartit TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); List partitions = GetPartitionsHandler.getPartitionsResult( t -> startTableFunction("get_partitions_ps_with_auth_req", catName, t.getDb(), t.getTable()), - ex -> endFunction("get_partitions_ps_with_auth_req", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partitions_ps_with_auth_req", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .part_vals(req.getPartVals()).max(req.getMaxParts()) .userName(req.getUserName()).groupNames(req.getGroupNames()) @@ -4389,7 +4398,8 @@ public GetPartitionNamesPsResponse get_partition_names_ps_req(GetPartitionNamesP TableName tableName = new TableName(catName, dbName, tblName); List names = GetPartitionsHandler.getPartitionNames( t -> startTableFunction("get_partition_names_ps_req", catName, dbName, tblName), - ex -> endFunction("get_partition_names_ps_req", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partition_names_ps_req", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .max(req.getMaxParts()).part_vals(req.getPartValues()).build()) .result(); @@ -4406,7 +4416,8 @@ public List get_partition_names_req(PartitionsByExprRequest req) TableName tableName = new TableName(catName, dbName, tblName); return GetPartitionsHandler.getPartitionNames( t -> startTableFunction("get_partition_names_req", catName, dbName, tblName), - ex -> endFunction("get_partition_names_req", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partition_names_req", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .defaultPartName(req.getDefaultPartitionName()).max(req.getMaxParts()) .expr(req.getExpr()).order(req.getOrder()).build()) @@ -4757,7 +4768,8 @@ public List get_partitions_by_filter(final String dbName, final Strin TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); return GetPartitionsHandler.getPartitionsResult( t -> startTableFunction("get_partitions_by_filter", t.getCat(), t.getDb(), t.getTable()), - ex -> endFunction("get_partitions_by_filter", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partitions_by_filter", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().filter(filter).max(maxParts).build()).result(); } @@ -4768,7 +4780,8 @@ public List get_partitions_by_filter_req(GetPartitionsByFilterRequest TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); return GetPartitionsHandler.getPartitionsResult( t -> startTableFunction("get_partitions_by_filter", catName, t.getDb(), t.getTable()), - ex -> endFunction("get_partitions_by_filter", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partitions_by_filter", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .filter(req.getFilter()).max(req.getMaxParts()) .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) @@ -4823,7 +4836,8 @@ public PartitionsSpecByExprResult get_partitions_spec_by_expr( GetPartitionsHandler.GetPartitionsResult result = GetPartitionsHandler.getPartitionsResult( t -> startTableFunction("get_partitions_spec_by_expr", catName, dbName, req.getTblName()), - e -> endFunction("get_partitions_spec_by_expr", e == null, e, + rex -> endFunction("get_partitions_spec_by_expr", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), TableName.getQualified(catName, req.getDbName(), req.getTblName())), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .expr(req.getExpr()).max(req.getMaxParts()).defaultPartName(req.getDefaultPartitionName()) @@ -4849,7 +4863,8 @@ public PartitionsByExprResult get_partitions_by_expr( GetPartitionsHandler.GetPartitionsResult result = GetPartitionsHandler.getPartitionsResult( t -> startTableFunction("get_partitions_by_expr", catName, dbName, req.getTblName()), - ex -> endFunction("get_partitions_by_expr", ex == null, ex, + rex -> endFunction("get_partitions_by_expr", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), TableName.getQualified(catName, req.getDbName(), req.getTblName())), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .expr(req.getExpr()).defaultPartName(defaultPartitionName).max(maxParts) @@ -4898,7 +4913,8 @@ public List get_partitions_by_names(final String dbName, final String TableName tableName = new TableName(dbNameParts[CAT_NAME], dbNameParts[DB_NAME], tblName); return GetPartitionsHandler.getPartitionsResult( t -> startTableFunction("get_partitions_by_names", t.getCat(), t.getDb(), t.getTable()), - ex -> endFunction("get_partitions_by_names", ex == null, ex, tableName.toString()), + rex -> endFunction("get_partitions_by_names", + rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(partNames).build()).result(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java index fc89cd765798..77b4474925d7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; @@ -465,16 +466,20 @@ public boolean isFetchPartNames() { } public static List getPartitions(Consumer preHook, - Consumer postHook, IHMSHandler handler, TableName tableName, + Consumer> postHook, IHMSHandler handler, TableName tableName, GetPartitionsArgs args, boolean assumeResult) throws NoSuchObjectException, MetaException { Exception ex = null; + GetPartitionsResult result = null; try { GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args); preHook.accept(tableName); GetPartitionsHandler getPartsHandler = AbstractRequestHandler.offer(handler, getPartitionsRequest); - List partitions = getPartsHandler.getResult().result(); + result = getPartsHandler.getResult(); + List partitions = result.result(); if (assumeResult && (partitions == null || partitions.isEmpty())) { + // Create a new dummy GetPartitionsResult for postHook to consume + result = new GetPartitionsResult(List.of(), false); throw new NoSuchObjectException(tableName + " partition not found"); } return partitions; @@ -483,45 +488,49 @@ public static List getPartitions(Consumer preHook, throw handleException(e).throwIfInstance(NoSuchObjectException.class, MetaException.class) .defaultMetaException(); } finally { - postHook.accept(ex); + postHook.accept(Pair.of(result, ex)); } } public static GetPartitionsResult getPartitionsResult( Consumer preHook, - Consumer postHook, + Consumer> postHook, IHMSHandler handler, TableName tableName, GetPartitionsArgs args) throws TException { + GetPartitionsResult result = null; Exception ex = null; try { GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args); preHook.accept(tableName); GetPartitionsHandler getPartsHandler = AbstractRequestHandler.offer(handler, getPartitionsRequest); - return getPartsHandler.getResult(); + result = getPartsHandler.getResult(); + return result; } catch (Exception e) { ex = e; throw handleException(ex).defaultTException(); } finally { - postHook.accept(ex); + postHook.accept(Pair.of(result, ex)); } } public static GetPartitionsResult getPartitionNames(Consumer preExecutor, - Consumer postConsumer, IHMSHandler handler, TableName tableName, + Consumer> postConsumer, IHMSHandler handler, TableName tableName, GetPartitionsArgs args) throws TException { Exception ex = null; + GetPartitionsResult result = null; try { preExecutor.accept(tableName); GetPartitionsRequest getPartitionsRequest = new GetPartitionsRequest(tableName, args, true); GetPartitionsHandler getPartNamesHandler = AbstractRequestHandler.offer(handler, getPartitionsRequest); - return getPartNamesHandler.getResult(); + result = getPartNamesHandler.getResult(); + return result; } catch (Exception e) { ex = e; throw handleException(ex).defaultTException(); } finally { - postConsumer.accept(ex); + postConsumer.accept(Pair.of(result, ex)); } } From c463b5e305e6ec5914ab6932a1176bfba1abb57b Mon Sep 17 00:00:00 2001 From: zdeng Date: Tue, 10 Feb 2026 12:37:52 +0800 Subject: [PATCH 9/9] fix-ut-2 --- ...etastoreClientListPartitionsTempTable.java | 2 +- .../hadoop/hive/metastore/HMSHandler.java | 8 +-- .../handler/GetPartitionsHandler.java | 69 +++++++++++++------ 3 files changed, 52 insertions(+), 27 deletions(-) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java index 006ba6a0584a..3dd4bd94d3ee 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java @@ -234,7 +234,7 @@ public void testListPartitionNamesNoDb() throws Exception { super.testListPartitionNamesNoDb(); } - @Test + @Test(expected = NoSuchObjectException.class) @Override public void testListPartitionsAllNoTable() throws Exception { super.testListPartitionsAllNoTable(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index e22a47584ac1..ee82ff9e5b08 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -3174,12 +3174,12 @@ public GetPartitionResponse get_partition_req(GetPartitionRequest req) throws MetaException, NoSuchObjectException, TException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); TableName tableName = new TableName(catName, req.getDbName(), req.getTblName()); - GetPartitionsHandler.validatePartVals(this, tableName, req.getPartVals()); + String partName = GetPartitionsHandler.validatePartVals(this, tableName, req.getPartVals()); List partitions = GetPartitionsHandler.getPartitions( t -> startTableFunction("get_partition_req", catName, t.getDb(), t.getTable()), rex -> endFunction("get_partition_req", rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tableName.toString()), - this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().part_vals(req.getPartVals()).build(), + this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(List.of(partName)).build(), true); return new GetPartitionResponse(partitions.getFirst()); } @@ -3268,14 +3268,14 @@ public Partition get_partition_with_auth(final String db_name, throws TException { String[] parsedDbName = parseDbName(db_name, conf); TableName tableName = new TableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); - GetPartitionsHandler.validatePartVals(this, tableName, part_vals); + String partName = GetPartitionsHandler.validatePartVals(this, tableName, part_vals); List partitions = GetPartitionsHandler.getPartitions( t -> startFunction("get_partition_with_auth", " : tbl=" + t + samplePartitionValues(part_vals) + getGroupsCountAndUsername(user_name,group_names)), rex -> endFunction("get_partition_with_auth", rex.getLeft() != null && rex.getLeft().success(), rex.getRight(), tbl_name), this, tableName, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .part_vals(part_vals).userName(user_name).groupNames(group_names).build(), true); + .partNames(List.of(partName)).userName(user_name).groupNames(group_names).build(), true); return partitions.getFirst(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java index 77b4474925d7..49cb02c8a25a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/GetPartitionsHandler.java @@ -80,7 +80,7 @@ enum GetPartitionsMethod { @Override protected void beforeExecute() throws TException, IOException { - this.args = request.getGetPartitionsArgs(); + args = request.getGetPartitionsArgs(); if (request.isGetPartitionValues()) { getMethod = GetPartitionsMethod.VALUES; } else if (args.getExpr() != null) { @@ -95,16 +95,16 @@ protected void beforeExecute() throws TException, IOException { getMethod = GetPartitionsMethod.ALL; } - this.catName = normalizeIdentifier(request.getTableName().getCat()); - this.dbName = normalizeIdentifier(request.getTableName().getDb()); - this.tblName = normalizeIdentifier(request.getTableName().getTable()); - this.conf = handler.getConf(); - this.rs = handler.getMS(); - this.filterHook = handler.getMetaFilterHook(); - this.isServerFilterEnabled = filterHook != null; + catName = normalizeIdentifier(request.getTableName().getCat()); + dbName = normalizeIdentifier(request.getTableName().getDb()); + tblName = normalizeIdentifier(request.getTableName().getTable()); + conf = handler.getConf(); + rs = handler.getMS(); + filterHook = handler.getMetaFilterHook(); + isServerFilterEnabled = filterHook != null; GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName); getTableRequest.setCatName(catName); - this.table = handler.get_table_core(getTableRequest); + table = handler.get_table_core(getTableRequest); ((HMSHandler) handler).firePreEvent(new PreReadTableEvent(table, handler)); authorizeTableForPartitionMetadata(); @@ -148,11 +148,9 @@ private GetPartitionsResult getPartitionValues() throws MetaException { return new GetPartitionsResult<>(List.of(resp), true); } - private void checkLimitNumberOfPartitionsByPs(List partVals, int requestMax) - throws TException { + private void checkLimitNumberOfPartitionsByPs(List partVals, int requestMax) throws TException { if (exceedsPartitionFetchLimit(requestMax)) { - checkLimitNumberOfPartitions(tblName, rs.getNumPartitionsByPs(catName, dbName, tblName, - partVals)); + checkLimitNumberOfPartitions(tblName, rs.getNumPartitionsByPs(catName, dbName, tblName, partVals)); } } @@ -185,8 +183,7 @@ private GetPartitionsResult getPartitionsByFilter() throws TException * @throws NoSuchObjectException * @throws MetaException */ - private void authorizeTableForPartitionMetadata() - throws NoSuchObjectException, MetaException { + private void authorizeTableForPartitionMetadata() throws NoSuchObjectException, MetaException { FilterUtils.checkDbAndTableFilters( isServerFilterEnabled, filterHook, catName, dbName, tblName); } @@ -197,7 +194,11 @@ private GetPartitionsResult getPartitionsByNames() throws TException rs.openTransaction(); try { checkLimitNumberOfPartitions(tblName, args.getPartNames().size()); - ret = rs.getPartitionsByNames(catName, dbName, tblName, args); + if (args.getUserName() != null) { + ret = rs.listPartitionsPsWithAuth(catName, dbName, tblName, args); + } else { + ret = rs.getPartitionsByNames(catName, dbName, tblName, args); + } ret = FilterUtils.filterPartitionsIfEnabled(isServerFilterEnabled, filterHook, ret); // If requested add column statistics in each of the partition objects @@ -467,7 +468,7 @@ public boolean isFetchPartNames() { public static List getPartitions(Consumer preHook, Consumer> postHook, IHMSHandler handler, TableName tableName, - GetPartitionsArgs args, boolean assumeResult) throws NoSuchObjectException, MetaException { + GetPartitionsArgs args, boolean assumeUniqResult) throws NoSuchObjectException, MetaException { Exception ex = null; GetPartitionsResult result = null; try { @@ -477,14 +478,37 @@ public static List getPartitions(Consumer preHook, AbstractRequestHandler.offer(handler, getPartitionsRequest); result = getPartsHandler.getResult(); List partitions = result.result(); - if (assumeResult && (partitions == null || partitions.isEmpty())) { - // Create a new dummy GetPartitionsResult for postHook to consume - result = new GetPartitionsResult(List.of(), false); - throw new NoSuchObjectException(tableName + " partition not found"); + if (assumeUniqResult) { + List partitionKeys = getPartsHandler.table.getPartitionKeys(); + String requestPartName = null; + if (args.getPart_vals() != null) { + requestPartName = Warehouse.makePartName(partitionKeys, args.getPart_vals()); + } else if (args.getPartNames() != null) { + requestPartName = args.getPartNames().getFirst(); + } + if (partitions == null || partitions.isEmpty()) { + throw new NoSuchObjectException(tableName + " partition: " + requestPartName + " not found"); + } else if (partitions.size() > 1) { + throw new MetaException( + "Expecting only one partition but more than one partitions are found."); + } else { + // See ObjectStore getPartitionWithAuth + // We need to compare partition name with requested name since some DBs + // (like MySQL, Derby) considers 'a' = 'a ' whereas others like (Postgres, + // Oracle) doesn't exhibit this problem. + Partition partition = partitions.getFirst(); + String partName = Warehouse.makePartName(partitionKeys, partition.getValues()); + if (!partName.equals(requestPartName)) { + throw new MetaException("Expecting a partition with name " + requestPartName + + ", but metastore is returning a partition with name " + partName + "."); + } + } } return partitions; } catch (Exception e) { ex = e; + // Create a new dummy GetPartitionsResult for postHook to consume + result = new GetPartitionsResult(List.of(), false); throw handleException(e).throwIfInstance(NoSuchObjectException.class, MetaException.class) .defaultMetaException(); } finally { @@ -534,7 +558,7 @@ public static GetPartitionsResult getPartitionNames(Consumer } } - public static void validatePartVals(IHMSHandler handler, + public static String validatePartVals(IHMSHandler handler, TableName tableName, List partVals) throws MetaException, NoSuchObjectException { if (partVals == null || partVals.isEmpty()) { throw new MetaException("The partVals is null or empty"); @@ -547,5 +571,6 @@ public static void validatePartVals(IHMSHandler handler, throw new MetaException("Unmatched partition values, partition keys size: " + size + ", partition values size: " + partVals.size()); } + return Warehouse.makePartName(table.getPartitionKeys(), partVals); } }