This is an automated email from the ASF dual-hosted git repository.
szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 77138b884a HDDS-13254. Change table iterator to optionally read key or
value. (#8621)
77138b884a is described below
commit 77138b884a8b0ff101dc04c09524b67705e749d1
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Sat Jun 14 13:35:39 2025 -0700
HDDS-13254. Change table iterator to optionally read key or value. (#8621)
---
.../ozone/container/metadata/DatanodeTable.java | 11 +---
.../hdds/utils/db/RDBStoreAbstractIterator.java | 4 +-
.../hdds/utils/db/RDBStoreByteArrayIterator.java | 11 +++-
.../hdds/utils/db/RDBStoreCodecBufferIterator.java | 10 ++--
.../org/apache/hadoop/hdds/utils/db/RDBTable.java | 28 ++++------
.../org/apache/hadoop/hdds/utils/db/Table.java | 61 +++++++++++++++-------
.../apache/hadoop/hdds/utils/db/TypedTable.java | 42 +++++++--------
.../hadoop/hdds/utils/db/InMemoryTestTable.java | 7 +--
.../utils/db/TestRDBStoreByteArrayIterator.java | 24 +++++++--
.../utils/db/TestRDBStoreCodecBufferIterator.java | 6 +--
.../hadoop/hdds/utils/db/TestRDBTableStore.java | 2 +-
.../hdds/utils/db/TestTypedRDBTableStore.java | 2 +-
.../hadoop/hdds/utils/db/TestTypedTable.java | 45 ++++++++++++++++
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 2 +-
.../hadoop/ozone/om/service/QuotaRepairTask.java | 12 +++--
15 files changed, 171 insertions(+), 96 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
index ec9359fb37..7f1aba66aa 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
@@ -23,7 +23,6 @@
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
/**
* Wrapper class to represent a table in a datanode RocksDB instance.
@@ -75,15 +74,7 @@ public void deleteWithBatch(BatchOperation batch, KEY key)
}
@Override
- public final TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator() {
- throw new UnsupportedOperationException("Iterating tables directly is not"
+
- " supported for datanode containers due to differing schema " +
- "version.");
- }
-
- @Override
- public final TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator(
- KEY prefix) {
+ public final KeyValueIterator<KEY, VALUE> iterator(KEY prefix,
KeyValueIterator.Type type) {
throw new UnsupportedOperationException("Iterating tables directly is not"
+
" supported for datanode containers due to differing schema " +
"version.");
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
index 75104a55ed..1e36d6bd07 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
@@ -25,12 +25,12 @@
import org.slf4j.LoggerFactory;
/**
- * An abstract {@link TableIterator} to iterate raw {@link Table.KeyValue}s.
+ * An abstract {@link Table.KeyValueIterator} to iterate raw {@link
Table.KeyValue}s.
*
* @param <RAW> the raw type.
*/
abstract class RDBStoreAbstractIterator<RAW>
- implements TableIterator<RAW, Table.KeyValue<RAW, RAW>> {
+ implements Table.KeyValueIterator<RAW, RAW> {
private static final Logger LOG =
LoggerFactory.getLogger(RDBStoreAbstractIterator.class);
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
index b821fd932a..10651fa7d2 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
@@ -25,10 +25,15 @@
* RocksDB store iterator using the byte[] API.
*/
class RDBStoreByteArrayIterator extends RDBStoreAbstractIterator<byte[]> {
+ private static final byte[] EMPTY = {};
+
+ private final Type type;
+
RDBStoreByteArrayIterator(ManagedRocksIterator iterator,
- RDBTable table, byte[] prefix) {
+ RDBTable table, byte[] prefix, Type type) {
super(iterator, table,
prefix == null ? null : Arrays.copyOf(prefix, prefix.length));
+ this.type = type;
seekToFirst();
}
@@ -40,7 +45,9 @@ byte[] key() {
@Override
Table.KeyValue<byte[], byte[]> getKeyValue() {
final ManagedRocksIterator i = getRocksDBIterator();
- return Table.newKeyValue(i.get().key(), i.get().value());
+ final byte[] key = type.readKey() ? i.get().key() : EMPTY;
+ final byte[] value = type.readValue() ? i.get().value() : EMPTY;
+ return Table.newKeyValue(key, value);
}
@Override
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
index 107b71ee50..96ae01b721 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
@@ -33,16 +33,16 @@ class RDBStoreCodecBufferIterator extends
RDBStoreAbstractIterator<CodecBuffer>
private final AtomicBoolean closed = new AtomicBoolean();
RDBStoreCodecBufferIterator(ManagedRocksIterator iterator, RDBTable table,
- CodecBuffer prefix) {
+ CodecBuffer prefix, Type type) {
super(iterator, table, prefix);
final String name = table != null ? table.getName() : null;
this.keyBuffer = new Buffer(
new CodecBuffer.Capacity(name + "-iterator-key", 1 << 10),
- buffer -> getRocksDBIterator().get().key(buffer));
+ type.readKey() ? buffer -> getRocksDBIterator().get().key(buffer) :
null);
this.valueBuffer = new Buffer(
new CodecBuffer.Capacity(name + "-iterator-value", 4 << 10),
- buffer -> getRocksDBIterator().get().value(buffer));
+ type.readValue() ? buffer -> getRocksDBIterator().get().value(buffer)
: null);
seekToFirst();
}
@@ -130,6 +130,10 @@ private void allocate() {
}
CodecBuffer getFromDb() {
+ if (source == null) {
+ return CodecBuffer.getEmptyBuffer();
+ }
+
for (prepare(); ; allocate()) {
final Integer required = buffer.putFromSource(source);
if (required == null) {
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
index f72b0084bd..d463d37430 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
@@ -94,7 +94,7 @@ public void putWithBatch(BatchOperation batch, byte[] key,
byte[] value)
@Override
public boolean isEmpty() throws IOException {
- try (TableIterator<byte[], KeyValue<byte[], byte[]>> keyIter = iterator())
{
+ try (KeyValueIterator<byte[], byte[]> keyIter = iterator((byte[]) null,
KeyValueIterator.Type.NEITHER)) {
keyIter.seekToFirst();
return !keyIter.hasNext();
}
@@ -210,22 +210,16 @@ public void deleteWithBatch(BatchOperation batch, byte[]
key)
}
@Override
- public TableIterator<byte[], KeyValue<byte[], byte[]>> iterator()
- throws IOException {
- return iterator((byte[])null);
- }
-
- @Override
- public TableIterator<byte[], KeyValue<byte[], byte[]>> iterator(byte[]
prefix)
- throws IOException {
+ public KeyValueIterator<byte[], byte[]> iterator(byte[] prefix,
KeyValueIterator.Type type)
+ throws RocksDatabaseException {
return new RDBStoreByteArrayIterator(db.newIterator(family, false), this,
- prefix);
+ prefix, type);
}
- TableIterator<CodecBuffer, KeyValue<CodecBuffer, CodecBuffer>> iterator(
- CodecBuffer prefix) throws IOException {
+ KeyValueIterator<CodecBuffer, CodecBuffer> iterator(
+ CodecBuffer prefix, KeyValueIterator.Type type) throws IOException {
return new RDBStoreCodecBufferIterator(db.newIterator(family, false),
- this, prefix);
+ this, prefix, type);
}
@Override
@@ -262,8 +256,7 @@ public List<KeyValue<byte[], byte[]>>
getSequentialRangeKVs(byte[] startKey,
@Override
public void deleteBatchWithPrefix(BatchOperation batch, byte[] prefix)
throws IOException {
- try (TableIterator<byte[], KeyValue<byte[], byte[]>> iter
- = iterator(prefix)) {
+ try (KeyValueIterator<byte[], byte[]> iter = iterator(prefix)) {
while (iter.hasNext()) {
deleteWithBatch(batch, iter.next().getKey());
}
@@ -273,7 +266,7 @@ public void deleteBatchWithPrefix(BatchOperation batch,
byte[] prefix)
@Override
public void dumpToFileWithPrefix(File externalFile, byte[] prefix)
throws IOException {
- try (TableIterator<byte[], KeyValue<byte[], byte[]>> iter =
iterator(prefix);
+ try (KeyValueIterator<byte[], byte[]> iter = iterator(prefix);
RDBSstFileWriter fileWriter = new RDBSstFileWriter(externalFile)) {
while (iter.hasNext()) {
final KeyValue<byte[], byte[]> entry = iter.next();
@@ -298,8 +291,7 @@ private List<KeyValue<byte[], byte[]>> getRangeKVs(byte[]
startKey,
"Invalid count given " + count + ", count must be greater than 0");
}
final List<KeyValue<byte[], byte[]>> result = new ArrayList<>();
- try (TableIterator<byte[], KeyValue<byte[], byte[]>> it
- = iterator(prefix)) {
+ try (KeyValueIterator<byte[], byte[]> it = iterator(prefix)) {
if (startKey == null) {
it.seekToFirst();
} else {
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 5097cb0efc..9add35f5d5 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -153,21 +153,24 @@ default VALUE getReadCopy(KEY key) throws IOException {
*/
void deleteRange(KEY beginKey, KEY endKey) throws IOException;
- /**
- * Returns the iterator for this metadata store.
- *
- * @return MetaStoreIterator
- * @throws IOException on failure.
- */
- TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator()
- throws IOException;
+ /** The same as iterator(null). */
+ default KeyValueIterator<KEY, VALUE> iterator() throws IOException {
+ return iterator(null);
+ }
+
+ /** The same as iterator(prefix, KEY_AND_VALUE). */
+ default KeyValueIterator<KEY, VALUE> iterator(KEY prefix) throws IOException
{
+ return iterator(prefix, KeyValueIterator.Type.KEY_AND_VALUE);
+ }
/**
- * Returns a prefixed iterator for this metadata store.
- * @param prefix
- * @return MetaStoreIterator
+ * Iterate the elements in this table.
+ *
+ * @param prefix The prefix of the elements to be iterated.
+ * @param type Specify whether key and/or value are required.
+ * @return an iterator.
*/
- TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator(KEY prefix)
+ KeyValueIterator<KEY, VALUE> iterator(KEY prefix, KeyValueIterator.Type type)
throws IOException;
/**
@@ -328,12 +331,12 @@ void deleteBatchWithPrefix(BatchOperation batch, KEY
prefix)
final class KeyValue<K, V> {
private final K key;
private final V value;
- private final int rawSize;
+ private final int valueByteSize;
- private KeyValue(K key, V value, int rawSize) {
+ private KeyValue(K key, V value, int valueByteSize) {
this.key = key;
this.value = value;
- this.rawSize = rawSize;
+ this.valueByteSize = valueByteSize;
}
public K getKey() {
@@ -344,8 +347,8 @@ public V getValue() {
return value;
}
- public int getRawSize() {
- return rawSize;
+ public int getValueByteSize() {
+ return valueByteSize;
}
@Override
@@ -375,12 +378,32 @@ static <K, V> KeyValue<K, V> newKeyValue(K key, V value) {
return newKeyValue(key, value, 0);
}
- static <K, V> KeyValue<K, V> newKeyValue(K key, V value, int rawSize) {
- return new KeyValue<>(key, value, rawSize);
+ static <K, V> KeyValue<K, V> newKeyValue(K key, V value, int valueByteSize) {
+ return new KeyValue<>(key, value, valueByteSize);
}
/** A {@link TableIterator} to iterate {@link KeyValue}s. */
interface KeyValueIterator<KEY, VALUE>
extends TableIterator<KEY, KeyValue<KEY, VALUE>> {
+
+ /** The iterator type. */
+ enum Type {
+ /** Neither read key nor value. */
+ NEITHER,
+ /** Read key only. */
+ KEY_ONLY,
+ /** Read value only. */
+ VALUE_ONLY,
+ /** Read both key and value. */
+ KEY_AND_VALUE;
+
+ boolean readKey() {
+ return (this.ordinal() & KEY_ONLY.ordinal()) != 0;
+ }
+
+ boolean readValue() {
+ return (this.ordinal() & VALUE_ONLY.ordinal()) != 0;
+ }
+ }
}
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index c3c3935211..abb5edf81d 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -92,7 +92,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY,
VALUE> {
if (cacheType == CacheType.FULL_CACHE) {
cache = new FullTableCache<>(threadNamePrefix);
//fill cache
- try (TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> tableIterator =
iterator()) {
+ try (KeyValueIterator<KEY, VALUE> tableIterator = iterator()) {
while (tableIterator.hasNext()) {
KeyValue< KEY, VALUE > kv = tableIterator.next();
@@ -124,11 +124,11 @@ private byte[] encodeValue(VALUE value) throws
IOException {
}
private KEY decodeKey(byte[] key) throws CodecException {
- return key == null ? null : keyCodec.fromPersistedFormat(key);
+ return key != null && key.length > 0 ? keyCodec.fromPersistedFormat(key) :
null;
}
private VALUE decodeValue(byte[] value) throws CodecException {
- return value == null ? null : valueCodec.fromPersistedFormat(value);
+ return value != null && value.length > 0 ?
valueCodec.fromPersistedFormat(value) : null;
}
@Override
@@ -395,17 +395,11 @@ public void deleteRange(KEY beginKey, KEY endKey) throws
IOException {
}
@Override
- public Table.KeyValueIterator<KEY, VALUE> iterator() throws IOException {
- return iterator(null);
- }
-
- @Override
- public Table.KeyValueIterator<KEY, VALUE> iterator(KEY prefix)
- throws IOException {
+ public Table.KeyValueIterator<KEY, VALUE> iterator(KEY prefix,
KeyValueIterator.Type type) throws IOException {
if (supportCodecBuffer) {
final CodecBuffer prefixBuffer = encodeKeyCodecBuffer(prefix);
try {
- return newCodecBufferTableIterator(rawTable.iterator(prefixBuffer));
+ return newCodecBufferTableIterator(rawTable.iterator(prefixBuffer,
type));
} catch (Throwable t) {
if (prefixBuffer != null) {
prefixBuffer.release();
@@ -414,7 +408,7 @@ public Table.KeyValueIterator<KEY, VALUE> iterator(KEY
prefix)
}
} else {
final byte[] prefixBytes = encodeKey(prefix);
- return new TypedTableIterator(rawTable.iterator(prefixBytes));
+ return new TypedTableIterator(rawTable.iterator(prefixBytes, type));
}
}
@@ -534,7 +528,7 @@ TableCache<KEY, VALUE> getCache() {
}
RawIterator<CodecBuffer> newCodecBufferTableIterator(
- TableIterator<CodecBuffer, KeyValue<CodecBuffer, CodecBuffer>> i) {
+ KeyValueIterator<CodecBuffer, CodecBuffer> i) {
return new RawIterator<CodecBuffer>(i) {
@Override
AutoCloseSupplier<CodecBuffer> convert(KEY key) throws IOException {
@@ -554,10 +548,14 @@ public CodecBuffer get() {
@Override
KeyValue<KEY, VALUE> convert(KeyValue<CodecBuffer, CodecBuffer> raw)
throws CodecException {
- final int rawSize = raw.getValue().readableBytes();
- final KEY key = keyCodec.fromCodecBuffer(raw.getKey());
- final VALUE value = valueCodec.fromCodecBuffer(raw.getValue());
- return Table.newKeyValue(key, value, rawSize);
+ final CodecBuffer keyBuffer = raw.getKey();
+ final KEY key = keyBuffer.readableBytes() > 0 ?
keyCodec.fromCodecBuffer(keyBuffer) : null;
+
+ final CodecBuffer valueBuffer = raw.getValue();
+ final int valueByteSize = valueBuffer.readableBytes();
+ final VALUE value = valueByteSize > 0 ?
valueCodec.fromCodecBuffer(valueBuffer) : null;
+
+ return Table.newKeyValue(key, value, valueByteSize);
}
};
}
@@ -566,8 +564,7 @@ KeyValue<KEY, VALUE> convert(KeyValue<CodecBuffer,
CodecBuffer> raw) throws Code
* Table Iterator implementation for strongly typed tables.
*/
public class TypedTableIterator extends RawIterator<byte[]> {
- TypedTableIterator(
- TableIterator<byte[], KeyValue<byte[], byte[]>> rawIterator) {
+ TypedTableIterator(KeyValueIterator<byte[], byte[]> rawIterator) {
super(rawIterator);
}
@@ -579,7 +576,8 @@ AutoCloseSupplier<byte[]> convert(KEY key) throws
IOException {
@Override
KeyValue<KEY, VALUE> convert(KeyValue<byte[], byte[]> raw) throws
CodecException {
- return Table.newKeyValue(decodeKey(raw.getKey()),
decodeValue(raw.getValue()));
+ final byte[] valueBytes = raw.getValue();
+ return Table.newKeyValue(decodeKey(raw.getKey()),
decodeValue(valueBytes), valueBytes.length);
}
}
@@ -590,9 +588,9 @@ KeyValue<KEY, VALUE> convert(KeyValue<byte[], byte[]> raw)
throws CodecException
*/
abstract class RawIterator<RAW>
implements Table.KeyValueIterator<KEY, VALUE> {
- private final TableIterator<RAW, KeyValue<RAW, RAW>> rawIterator;
+ private final KeyValueIterator<RAW, RAW> rawIterator;
- RawIterator(TableIterator<RAW, KeyValue<RAW, RAW>> rawIterator) {
+ RawIterator(KeyValueIterator<RAW, RAW> rawIterator) {
this.rawIterator = rawIterator;
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
index 62e15a0866..bf960afe3e 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
@@ -90,12 +90,7 @@ public void deleteRange(KEY beginKey, KEY endKey) {
}
@Override
- public TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> iterator(KEY
prefix) {
+ public KeyValueIterator<KEY, VALUE> iterator(KEY prefix,
KeyValueIterator.Type type) {
throw new UnsupportedOperationException();
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
index 0054dde422..0a7da42387 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
@@ -17,6 +17,10 @@
package org.apache.hadoop.hdds.utils.db;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_AND_VALUE;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_ONLY;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.NEITHER;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.VALUE_ONLY;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
@@ -68,12 +72,11 @@ public void setup() {
}
RDBStoreByteArrayIterator newIterator() {
- return new RDBStoreByteArrayIterator(managedRocksIterator, null, null);
+ return new RDBStoreByteArrayIterator(managedRocksIterator, null, null,
KEY_AND_VALUE);
}
RDBStoreByteArrayIterator newIterator(byte[] prefix) {
- return new RDBStoreByteArrayIterator(
- managedRocksIterator, rocksTableMock, prefix);
+ return new RDBStoreByteArrayIterator(managedRocksIterator, rocksTableMock,
prefix, KEY_AND_VALUE);
}
@Test
@@ -298,4 +301,19 @@ public void testNormalPrefixedIterator() throws
IOException {
iter.close();
}
+
+ @Test
+ public void testIteratorType() {
+ assertFalse(NEITHER.readKey());
+ assertFalse(NEITHER.readValue());
+
+ assertTrue(KEY_ONLY.readKey());
+ assertFalse(KEY_ONLY.readValue());
+
+ assertFalse(VALUE_ONLY.readKey());
+ assertTrue(VALUE_ONLY.readValue());
+
+ assertTrue(KEY_AND_VALUE.readKey());
+ assertTrue(KEY_AND_VALUE.readValue());
+ }
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
index 500c465a4d..4f3f282dff 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.utils.db;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_AND_VALUE;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
@@ -72,12 +73,11 @@ ManagedRocksIterator newManagedRocksIterator() {
}
RDBStoreCodecBufferIterator newIterator() {
- return new RDBStoreCodecBufferIterator(managedRocksIterator, null, null);
+ return new RDBStoreCodecBufferIterator(managedRocksIterator, null, null,
KEY_AND_VALUE);
}
RDBStoreCodecBufferIterator newIterator(CodecBuffer prefix) {
- return new RDBStoreCodecBufferIterator(
- managedRocksIterator, rdbTableMock, prefix);
+ return new RDBStoreCodecBufferIterator(managedRocksIterator, rdbTableMock,
prefix, KEY_AND_VALUE);
}
Answer<Integer> newAnswerInt(String name, int b) {
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index 065e8728e7..e421527b2e 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -614,7 +614,7 @@ static void assertIterator(int expectedCount, String prefix,
assertEquals(prefix,
entry.getKey().substring(0, PREFIX_LENGTH));
assertEquals(entry.getValue().getBytes(StandardCharsets.UTF_8).length,
- entry.getRawSize());
+ entry.getValueByteSize());
}
assertEquals(expectedCount, keyCount);
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
index 58b646c10b..318bfdbc5d 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
@@ -246,7 +246,7 @@ public void forEachAndIterator() throws Exception {
@Test
public void testIteratorOnException() throws Exception {
RDBTable rdbTable = mock(RDBTable.class);
- when(rdbTable.iterator((CodecBuffer) null))
+ when(rdbTable.iterator((CodecBuffer) null,
Table.KeyValueIterator.Type.KEY_AND_VALUE))
.thenThrow(new IOException());
try (Table<String, String> testTable = new TypedTable<>(rdbTable,
StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE)) {
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedTable.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedTable.java
index a75de4386e..84e3ddb635 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedTable.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedTable.java
@@ -17,7 +17,13 @@
package org.apache.hadoop.hdds.utils.db;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_AND_VALUE;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_ONLY;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.NEITHER;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.VALUE_ONLY;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.IOException;
@@ -26,12 +32,14 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.LongFunction;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.hdds.utils.db.cache.TableCache;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
@@ -99,6 +107,7 @@ static <V> Map<Long, V> newMap(LongFunction<V> constructor) {
final long key = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE) +
1;
put(map, key, constructor);
}
+ System.out.println("generated " + map.size() + " keys");
return map;
}
@@ -143,5 +152,41 @@ public void testContainerIDvsLong() throws Exception {
assertEquals(expected, longValue);
}
}
+
+ // test iterator type
+ try (TypedTable<Long, String> longTable = newTypedTable(
+ 1, LongCodec.get(), StringCodec.get());
+ Table.KeyValueIterator<Long, String> neither =
longTable.iterator(null, NEITHER);
+ Table.KeyValueIterator<Long, String> keyOnly =
longTable.iterator(null, KEY_ONLY);
+ Table.KeyValueIterator<Long, String> valueOnly =
longTable.iterator(null, VALUE_ONLY);
+ Table.KeyValueIterator<Long, String> keyAndValue =
longTable.iterator(null, KEY_AND_VALUE)) {
+ while (keyAndValue.hasNext()) {
+ final Table.KeyValue<Long, String> keyValue = keyAndValue.next();
+ final Long expectedKey = Objects.requireNonNull(keyValue.getKey());
+
+ final String expectedValue =
Objects.requireNonNull(keyValue.getValue());
+ assertEquals(keys.get(expectedKey).toString(), expectedValue);
+
+ final int expectedValueSize = keyValue.getValueByteSize();
+ assertEquals(expectedValue.length(), expectedValueSize);
+
+ assertKeyValue(expectedKey, null, 0, keyOnly);
+ assertKeyValue(null, expectedValue, expectedValueSize, valueOnly);
+ assertKeyValue(null, null, 0, neither);
+ }
+
+ assertFalse(keyOnly.hasNext());
+ assertFalse(valueOnly.hasNext());
+ assertFalse(neither.hasNext());
+ }
+ }
+
+ static <K, V> void assertKeyValue(K expectedKey, V expectedValue, int
expectedValueSize,
+ Table.KeyValueIterator<K, V> iterator) {
+ assertTrue(iterator.hasNext());
+ final KeyValue<K, V> computed = iterator.next();
+ assertEquals(expectedKey, computed.getKey());
+ assertEquals(expectedValue, computed.getValue());
+ assertEquals(expectedValueSize, computed.getValueByteSize());
}
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 578afc630a..3216948c55 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2189,7 +2189,7 @@ private <T extends WithParentObjectId> DeleteKeysResult
gatherSubPathsWithIterat
while (iterator.hasNext() && remainingBufLimit > 0) {
KeyValue<String, T> entry = iterator.next();
T withParentObjectId = entry.getValue();
- long objectSerializedSize = entry.getRawSize();
+ final long objectSerializedSize = entry.getValueByteSize();
if
(!OMFileRequest.isImmediateChild(withParentObjectId.getParentObjectID(),
parentInfo.getObjectID())) {
processedSubPaths = true;
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
index 1204ce2ecc..965fa3dbd0 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
@@ -17,6 +17,9 @@
package org.apache.hadoop.ozone.om.service;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_AND_VALUE;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_ONLY;
+import static
org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.VALUE_ONLY;
import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
@@ -47,7 +50,6 @@
import org.apache.hadoop.hdds.server.JsonUtils;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
@@ -238,8 +240,8 @@ private void prepareAllBucketInfo(
}
return;
}
- try (TableIterator<String, ? extends Table.KeyValue<String, OmBucketInfo>>
- iterator = metadataManager.getBucketTable().iterator()) {
+ try (Table.KeyValueIterator<String, OmBucketInfo> iterator
+ = metadataManager.getBucketTable().iterator(null, VALUE_ONLY)) {
while (iterator.hasNext()) {
Table.KeyValue<String, OmBucketInfo> entry = iterator.next();
OmBucketInfo bucketInfo = entry.getValue();
@@ -350,8 +352,8 @@ private <VALUE> void recalculateUsages(
}
int count = 0;
long startTime = Time.monotonicNow();
- try (TableIterator<String, ? extends Table.KeyValue<String, VALUE>>
- keyIter = table.iterator()) {
+ try (Table.KeyValueIterator<String, VALUE> keyIter
+ = table.iterator(null, haveValue ? KEY_AND_VALUE : KEY_ONLY)) {
while (keyIter.hasNext()) {
count++;
kvList.add(keyIter.next());
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]