Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDFS-17496. DataNode supports more fine-grained dataset lock based on blockid. #7280

Open
wants to merge 23 commits into
base: trunk
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ public interface DataNodeLockManager<T extends AutoCloseDataSetLock> {
*/
hfutatzhanghb marked this conversation as resolved.
Show resolved Hide resolved
enum LockLevel {
BLOCK_POOl,
VOLUME
VOLUME,
DIR
}

/**
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdfs.server.datanode;

import java.util.List;

public class DataNodeLayoutSubLockStrategy implements DataSetSubLockStrategy {
@Override
public String blockIdToSubLock(long blockid) {
return DatanodeUtil.idToBlockDirSuffixName(blockid);
}

@Override
public List<String> getAllSubLockName() {
return DatanodeUtil.getAllSubDirNameForDataSetLock();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,13 @@ private String generateLockName(LockLevel level, String... resources) {
+ resources[0] + "volume lock :" + resources[1]);
}
return resources[0] + resources[1];
} else if (resources.length == 3 && level == LockLevel.DIR) {
if (resources[0] == null || resources[1] == null || resources[2] == null) {
throw new IllegalArgumentException("acquire a null dataset lock : "
+ resources[0] + ",volume lock :" + resources[1]
+ ",subdir lock :" + resources[2]);
}
return resources[0] + resources[1] + resources[2];
} else {
throw new IllegalArgumentException("lock level do not match resource");
}
Expand Down Expand Up @@ -156,7 +163,7 @@ public DataSetLockManager(Configuration conf, DataNode dn) {
public AutoCloseDataSetLock readLock(LockLevel level, String... resources) {
if (level == LockLevel.BLOCK_POOl) {
return getReadLock(level, resources[0]);
} else {
} else if (level == LockLevel.VOLUME){
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
AutoCloseDataSetLock volLock = getReadLock(level, resources);
volLock.setParentLock(bpLock);
Expand All @@ -165,14 +172,25 @@ public AutoCloseDataSetLock readLock(LockLevel level, String... resources) {
resources[0]);
}
return volLock;
} else {
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]);
volLock.setParentLock(bpLock);
AutoCloseDataSetLock dirLock = getReadLock(level, resources);
dirLock.setParentLock(volLock);
if (openLockTrace) {
LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " +
resources[0] + resources[1]);
}
return dirLock;
}
}

@Override
public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) {
if (level == LockLevel.BLOCK_POOl) {
return getWriteLock(level, resources[0]);
} else {
} else if (level == LockLevel.VOLUME) {
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
AutoCloseDataSetLock volLock = getWriteLock(level, resources);
volLock.setParentLock(bpLock);
Expand All @@ -181,6 +199,17 @@ public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) {
resources[0]);
}
return volLock;
} else {
AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]);
AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]);
volLock.setParentLock(bpLock);
AutoCloseDataSetLock dirLock = getWriteLock(level, resources);
dirLock.setParentLock(volLock);
if (openLockTrace) {
LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " +
resources[0] + resources[1]);
}
return dirLock;
}
}

Expand Down Expand Up @@ -235,8 +264,13 @@ public void addLock(LockLevel level, String... resources) {
String lockName = generateLockName(level, resources);
if (level == LockLevel.BLOCK_POOl) {
lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
} else if (level == LockLevel.VOLUME) {
lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair));
lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
} else {
lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair));
lockMap.addLock(generateLockName(LockLevel.VOLUME, resources[0], resources[1]),
new ReentrantReadWriteLock(isFair));
lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair));
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdfs.server.datanode;

import java.util.List;

/**
* This interface is used to generate sub lock name for a blockid.
*/
public interface DataSetSubLockStrategy {

/**
* Generate sub lock name for the given blockid.
* @param blockid the block id.
* @return sub lock name for the input blockid.
*/
String blockIdToSubLock(long blockid);

List<String> getAllSubLockName();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Name -> Names

}
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
Expand Down Expand Up @@ -115,6 +117,8 @@ public static boolean dirNoFilesRecursive(
/**
* Get the directory where a finalized block with this ID should be stored.
* Do not attempt to create the directory.
* Note: update {@link DatanodeUtil#idToBlockDirSuffixName(long)} and
* {@link DatanodeUtil#getAllSubDirNameForDataSetLock()} when current method changed.
* @param root the root directory where finalized blocks are stored
* @param blockId
* @return
Expand All @@ -127,6 +131,32 @@ public static File idToBlockDir(File root, long blockId) {
return new File(root, path);
}

/**
* Take an example.
* We hava a block with blockid mapping to:
* "/data1/hadoop/hdfs/datanode/current/BP-xxxx/current/finalized/subdir0/subdir0"
* We return "subdir0/subdir0".
hfutatzhanghb marked this conversation as resolved.
Show resolved Hide resolved
* @param blockId the block id.
* @return two-level subdir string where block will be stored.
*/
public static String idToBlockDirSuffixName(long blockId) {
int d1 = (int) ((blockId >> 16) & 0x1F);
int d2 = (int) ((blockId >> 8) & 0x1F);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How do we explain the operation blockId >> 16 and blockId >> 8 here? Do you mean the same way with idToBlockDir? If that, I think it is better to use the common method to implement it.

+  public static String idToBlockDirSuffix(long blockId) {
+    int d1 = (int) ((blockId >> 16) & 0x1F);
+    int d2 = (int) ((blockId >> 8) & 0x1F);
+    return DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
+        DataStorage.BLOCK_SUBDIR_PREFIX + d2;
+  }
+
   /**
    * Get the directory where a finalized block with this ID should be stored.
    * Do not attempt to create the directory.
@@ -120,10 +127,7 @@ public static boolean dirNoFilesRecursive(
    * @return
    */
   public static File idToBlockDir(File root, long blockId) {
-    int d1 = (int) ((blockId >> 16) & 0x1F);
-    int d2 = (int) ((blockId >> 8) & 0x1F);
-    String path = DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
-        DataStorage.BLOCK_SUBDIR_PREFIX + d2;
+    String path = idToBlockDirSuffix(blockId);
     return new File(root, path);
   }

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Hexiaoqiao yes, blockId >> 16 and blockId >> 8 are the the same way with idToBlockDir.
Have refactor those methods and make literal 0x1F to be a static final field in DatanodeUtil.
Thanks a lot for your suggestions.

return DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
DataStorage.BLOCK_SUBDIR_PREFIX + d2;
}

public static List<String> getAllSubDirNameForDataSetLock() {
hfutatzhanghb marked this conversation as resolved.
Show resolved Hide resolved
List<String> res = new ArrayList<>();
for (int d1 = 0; d1 <= 0x1F; d1++) {
for (int d2 = 0; d2 <= 0x1F; d2++) {
res.add(DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
DataStorage.BLOCK_SUBDIR_PREFIX + d2);
}
}
return res;
}

/**
* @return the FileInputStream for the meta data of the given block.
* @throws FileNotFoundException
Expand Down
Loading
Loading