diff --git a/dev-support/pmd/pmd-ruleset.xml b/dev-support/pmd/pmd-ruleset.xml
index d03a463def62..63371ac20a1d 100644
--- a/dev-support/pmd/pmd-ruleset.xml
+++ b/dev-support/pmd/pmd-ruleset.xml
@@ -45,7 +45,9 @@
+
+
diff --git a/hadoop-hdds/cli-common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/cli-common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index da46c2600389..6d6463b09406 100644
--- a/hadoop-hdds/cli-common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ b/hadoop-hdds/cli-common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -134,22 +134,23 @@ protected PrintWriter err() {
}
private static String handleFileSystemException(FileSystemException e) {
- String errorMessage = e.getMessage();
+ StringBuilder sb = new StringBuilder();
+ sb.append("Error: ");
// If reason is set, return the exception's message as it is.
// Otherwise, construct a custom message based on the type of exception
if (e.getReason() == null) {
if (e instanceof NoSuchFileException) {
- errorMessage = "File not found: " + errorMessage;
+ sb.append("File not found: ");
} else if (e instanceof AccessDeniedException) {
- errorMessage = "Access denied: " + errorMessage;
+ sb.append("Access denied: ");
} else if (e instanceof FileAlreadyExistsException) {
- errorMessage = "File already exists: " + errorMessage;
+ sb.append("File already exists: ");
} else {
- errorMessage = e.getClass().getSimpleName() + ": " + errorMessage;
+ sb.append(e.getClass().getSimpleName()).append(": ");
}
}
- return "Error: " + errorMessage;
+ return sb.append(e.getMessage()).toString();
}
}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 629932b0d371..4b9dd57004c3 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -163,7 +163,9 @@ protected XceiverClientSpi getClient(Pipeline pipeline, boolean topologyAware)
private String getPipelineCacheKey(Pipeline pipeline,
boolean topologyAware) {
- String key = pipeline.getId().getId().toString() + pipeline.getType();
+ StringBuilder key = new StringBuilder()
+ .append(pipeline.getId().getId())
+ .append(pipeline.getType());
boolean isEC = pipeline.getType() == HddsProtos.ReplicationType.EC;
if (topologyAware || isEC) {
try {
@@ -183,7 +185,8 @@ private String getPipelineCacheKey(Pipeline pipeline,
// Standalone port is chosen since all datanodes should have a
// standalone port regardless of version and this port should not
// have any collisions.
- key += closestNode.getHostName() + closestNode.getStandalonePort();
+ key.append(closestNode.getHostName())
+ .append(closestNode.getStandalonePort());
} catch (IOException e) {
LOG.error("Failed to get closest node to create pipeline cache key:" +
e.getMessage());
@@ -194,13 +197,13 @@ private String getPipelineCacheKey(Pipeline pipeline,
// Append user short name to key to prevent a different user
// from using same instance of xceiverClient.
try {
- key += UserGroupInformation.getCurrentUser().getShortUserName();
+ key.append(UserGroupInformation.getCurrentUser().getShortUserName());
} catch (IOException e) {
LOG.error("Failed to get current user to create pipeline cache key:" +
e.getMessage());
}
}
- return key;
+ return key.toString();
}
/**
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
index 73eaa7b74468..c71db0e41ed4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
@@ -597,13 +597,13 @@ protected void loadDataBuffersFromStream()
} catch (ExecutionException ee) {
boolean added = failedDataIndexes.add(index);
Throwable t = ee.getCause() != null ? ee.getCause() : ee;
- String msg = "{}: error reading [{}]";
+ StringBuilder msg = new StringBuilder("{}: error reading [{}]");
if (added) {
- msg += ", marked as failed";
+ msg.append(", marked as failed");
} else {
- msg += ", already had failed"; // should not really happen
+ msg.append(", already had failed"); // should not really happen
}
- LOG.info(msg, this, index, t);
+ LOG.info(msg.toString(), this, index, t);
exceptionOccurred = true;
} catch (InterruptedException ie) {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
index 618c9f9b4863..ef21de8c6c9b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java
@@ -62,16 +62,20 @@ public RocksDBCheckpoint createCheckpoint(String parentDir, String name) {
try {
long currentTime = System.currentTimeMillis();
- String checkpointDir = StringUtils.EMPTY;
+ StringBuilder checkpointDir = new StringBuilder();
if (StringUtils.isNotEmpty(checkpointNamePrefix)) {
- checkpointDir += checkpointNamePrefix;
+ checkpointDir.append(checkpointNamePrefix);
}
+
if (name == null) {
- name = "_" + RDB_CHECKPOINT_DIR_PREFIX + currentTime;
+ checkpointDir.append('_')
+ .append(RDB_CHECKPOINT_DIR_PREFIX)
+ .append(currentTime);
+ } else {
+ checkpointDir.append(name);
}
- checkpointDir += name;
- Path checkpointPath = Paths.get(parentDir, checkpointDir);
+ Path checkpointPath = Paths.get(parentDir, checkpointDir.toString());
Instant start = Instant.now();
// Flush the DB WAL and mem table.
diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java
index 1c4039be18e1..c3277d038beb 100644
--- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java
+++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java
@@ -162,13 +162,14 @@ public Void call() throws Exception {
if (cnt >= limit) {
break;
}
- String subPath = subPathDU.path("path").asText("");
+ StringBuilder sb = new StringBuilder(subPathDU.path("path").asText(""));
// differentiate key from other types
if (!subPathDU.path("isKey").asBoolean(false)) {
- subPath += OM_KEY_PREFIX;
+ sb.append(OM_KEY_PREFIX);
}
long size = subPathDU.path("size").asLong(-1);
long sizeWithReplica = subPathDU.path("sizeWithReplica").asLong(-1);
+ String subPath = sb.toString();
if (subPath.startsWith(seekStr)) {
printDURow(subPath, size, sizeWithReplica);
++cnt;
diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java
index c5477e0cb0ec..f261f2579d1b 100644
--- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java
+++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/ozone/admin/om/ListOpenFilesSubCommand.java
@@ -142,40 +142,42 @@ private void printOpenKeysList(ListOpenFilesResult res) {
for (OpenKeySession e : openFileList) {
long clientId = e.getId();
OmKeyInfo omKeyInfo = e.getKeyInfo();
- String line = clientId + "\t" + Instant.ofEpochMilli(omKeyInfo.getCreationTime()) + "\t";
+ StringBuilder line = new StringBuilder()
+ .append(clientId).append('\t')
+ .append(Instant.ofEpochMilli(omKeyInfo.getCreationTime())).append('\t');
if (omKeyInfo.isHsync()) {
String hsyncClientIdStr =
omKeyInfo.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID);
long hsyncClientId = Long.parseLong(hsyncClientIdStr);
if (clientId == hsyncClientId) {
- line += "Yes\t\t";
+ line.append("Yes\t\t");
} else {
// last hsync'ed with a different client ID than the client that
// initially opens the file (!)
- line += "Yes w/ cid " + hsyncClientIdStr + "\t";
+ line.append("Yes w/ cid ").append(hsyncClientIdStr).append('\t');
}
if (showDeleted) {
if (omKeyInfo.getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY)) {
- line += "Yes\t\t";
+ line.append("Yes\t\t");
} else {
- line += "No\t\t";
+ line.append("No\t\t");
}
}
if (showOverwritten) {
if (omKeyInfo.getMetadata().containsKey(OzoneConsts.OVERWRITTEN_HSYNC_KEY)) {
- line += "Yes\t";
+ line.append("Yes\t");
} else {
- line += "No\t";
+ line.append("No\t");
}
}
} else {
- line += showDeleted ? "No\t\tNo\t\t" : "No\t\t";
- line += showOverwritten ? "No\t" : "";
+ line.append(showDeleted ? "No\t\tNo\t\t" : "No\t\t");
+ line.append(showOverwritten ? "No\t" : "");
}
- line += getFullPathFromKeyInfo(omKeyInfo);
+ line.append(getFullPathFromKeyInfo(omKeyInfo));
System.out.println(line);
}
@@ -222,16 +224,16 @@ private String getMessageString(ListOpenFilesResult res, List op
* @return the command to get the next batch of open keys
*/
private String getCmdForNextBatch(String lastElementFullPath) {
- String nextBatchCmd = "ozone admin om lof " + omAddressOptions;
+ StringBuilder nextBatchCmd = new StringBuilder("ozone admin om lof ").append(omAddressOptions);
if (json) {
- nextBatchCmd += " --json";
+ nextBatchCmd.append(" --json");
}
- nextBatchCmd += " --length=" + limit;
+ nextBatchCmd.append(" --length=").append(limit);
if (pathPrefix != null && !pathPrefix.isEmpty()) {
- nextBatchCmd += " --prefix=" + pathPrefix;
+ nextBatchCmd.append(" --prefix=").append(pathPrefix);
}
- nextBatchCmd += " --start=" + lastElementFullPath;
- return nextBatchCmd;
+ nextBatchCmd.append(" --start=").append(lastElementFullPath);
+ return nextBatchCmd.toString();
}
private String getFullPathFromKeyInfo(OmKeyInfo oki) {
diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java
index e5a9f2aa2283..32accb32204f 100644
--- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java
+++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java
@@ -61,18 +61,18 @@ private void listKeysInsideBucket(OzoneClient client, OzoneAddress address)
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String snapshotNameWithIndicator = address.getSnapshotNameWithIndicator();
- String keyPrefix = "";
+ StringBuilder keyPrefix = new StringBuilder();
if (!Strings.isNullOrEmpty(snapshotNameWithIndicator)) {
- keyPrefix += snapshotNameWithIndicator;
+ keyPrefix.append(snapshotNameWithIndicator);
if (!Strings.isNullOrEmpty(prefixFilter.getPrefix())) {
- keyPrefix += "/";
+ keyPrefix.append('/');
}
}
if (!Strings.isNullOrEmpty(prefixFilter.getPrefix())) {
- keyPrefix += prefixFilter.getPrefix();
+ keyPrefix.append(prefixFilter.getPrefix());
}
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
@@ -82,7 +82,7 @@ private void listKeysInsideBucket(OzoneClient client, OzoneAddress address)
bucket.setListCacheSize(maxKeyLimit);
}
Iterator extends OzoneKey> keyIterator = bucket.listKeys(
- keyPrefix, listOptions.getStartItem());
+ keyPrefix.toString(), listOptions.getStartItem());
int counter = printAsJsonArray(keyIterator, maxKeyLimit);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 84722ede6294..80de9df1b30e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -192,7 +192,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath(
List acls = omBucketInfo.getAcls();
long lastKnownParentId = omBucketInfo.getObjectID();
- String dbDirName = ""; // absolute path for trace logs
+ StringBuilder dbDirName = new StringBuilder(); // absolute path for trace logs
// for better logging
StringBuilder fullKeyPath = new StringBuilder(bucketKey);
while (elements.hasNext()) {
@@ -219,7 +219,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath(
OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
get(dbNodeName);
if (omDirInfo != null) {
- dbDirName += omDirInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER;
+ dbDirName.append(omDirInfo.getName()).append(OzoneConsts.OZONE_URI_DELIMITER);
if (elements.hasNext()) {
result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
lastKnownParentId = omDirInfo.getObjectID();
@@ -264,7 +264,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath(
}
String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName,
- bucketName, dbDirName);
+ bucketName, dbDirName.toString());
LOG.trace("Acls from parent {} are : {}", dbDirKeyName, acls);
return new OMPathInfoWithFSO(leafNodeName, lastKnownParentId, missing,
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
index 38038acb6e17..f260347f31b7 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
@@ -100,15 +100,18 @@ public abstract OmDirectoryInfo getDirInfo(String[] names)
* @return subpath
*/
public static String buildSubpath(String path, String nextLevel) {
- String subpath = path;
- if (!subpath.startsWith(OM_KEY_PREFIX)) {
- subpath = OM_KEY_PREFIX + subpath;
- }
- subpath = removeTrailingSlashIfNeeded(subpath);
+ path = !path.startsWith(OM_KEY_PREFIX)
+ ? OM_KEY_PREFIX + path
+ : path;
+
+ StringBuilder sb = new StringBuilder(path)
+ .append(removeTrailingSlashIfNeeded(path));
+
if (nextLevel != null) {
- subpath = subpath + OM_KEY_PREFIX + nextLevel;
+ sb.append(nextLevel);
}
- return subpath;
+
+ return sb.toString();
}
/**