| | |
| | | */ |
| | | package org.forgerock.opendj.ldap; |
| | | |
| | | import java.util.Arrays; |
| | | |
| | | /** |
| | | * An interface for iteratively reading date from a {@link ByteSequence} . |
| | | * An interface for iteratively reading data from a {@link ByteSequence} . |
| | | * {@code ByteSequenceReader} must be created using the associated |
| | | * {@code ByteSequence}'s {@code asReader()} method. |
| | | */ |
| | | public final class ByteSequenceReader { |
| | | |
| | | private static final int[] DECODE_SIZE = new int[256]; |
| | | static { |
| | | Arrays.fill(DECODE_SIZE, 0, 0x80, 1); |
| | | Arrays.fill(DECODE_SIZE, 0x80, 0xc0, 2); |
| | | Arrays.fill(DECODE_SIZE, 0xc0, 0xe0, 3); |
| | | Arrays.fill(DECODE_SIZE, 0xe0, 0xf0, 4); |
| | | Arrays.fill(DECODE_SIZE, 0xf0, 0xf8, 5); |
| | | Arrays.fill(DECODE_SIZE, 0xf8, 0xfc, 6); |
| | | Arrays.fill(DECODE_SIZE, 0xfc, 0xfe, 7); |
| | | Arrays.fill(DECODE_SIZE, 0xfe, 0x100, 8); |
| | | } |
| | | |
| | | /** The current position in the byte sequence. */ |
| | | private int pos; |
| | | |
| | |
| | | } |
| | | |
| | | /** |
| | | * Relative get method for reading a compacted long value. |
| | | * Compaction allows to reduce number of bytes needed to hold long types |
| | | * depending on its value (i.e: if value < 128, value will be encoded using one byte only). |
| | | * Reads the next bytes at this reader's current position, composing them into a long value |
| | | * according to big-endian byte order, and then increments the position by the size of the |
| | | * encoded long. |
| | | * Note that the maximum value of a compact long is 2^56. |
| | | * |
| | | * @return The long value at this reader's current position. |
| | | * @throws IndexOutOfBoundsException |
| | | * If there are fewer bytes remaining in this reader than are |
| | | * required to satisfy the request. |
| | | */ |
| | | public long getCompactUnsigned() { |
| | | final int b0 = get(); |
| | | final int size = decodeSize(b0); |
| | | long value; |
| | | switch (size) { |
| | | case 1: |
| | | value = b2l((byte) b0); |
| | | break; |
| | | case 2: |
| | | value = (b0 & 0x3fL) << 8; |
| | | value |= b2l(get()); |
| | | break; |
| | | case 3: |
| | | value = (b0 & 0x1fL) << 16; |
| | | value |= b2l(get()) << 8; |
| | | value |= b2l(get()); |
| | | break; |
| | | case 4: |
| | | value = (b0 & 0x0fL) << 24; |
| | | value |= b2l(get()) << 16; |
| | | value |= b2l(get()) << 8; |
| | | value |= b2l(get()); |
| | | break; |
| | | case 5: |
| | | value = (b0 & 0x07L) << 32; |
| | | value |= b2l(get()) << 24; |
| | | value |= b2l(get()) << 16; |
| | | value |= b2l(get()) << 8; |
| | | value |= b2l(get()); |
| | | break; |
| | | case 6: |
| | | value = (b0 & 0x03L) << 40; |
| | | value |= b2l(get()) << 32; |
| | | value |= b2l(get()) << 24; |
| | | value |= b2l(get()) << 16; |
| | | value |= b2l(get()) << 8; |
| | | value |= b2l(get()); |
| | | break; |
| | | case 7: |
| | | value = (b0 & 0x01L) << 48; |
| | | value |= b2l(get()) << 40; |
| | | value |= b2l(get()) << 32; |
| | | value |= b2l(get()) << 24; |
| | | value |= b2l(get()) << 16; |
| | | value |= b2l(get()) << 8; |
| | | value |= b2l(get()); |
| | | break; |
| | | default: |
| | | value = b2l(get()) << 48; |
| | | value |= b2l(get()) << 40; |
| | | value |= b2l(get()) << 32; |
| | | value |= b2l(get()) << 24; |
| | | value |= b2l(get()) << 16; |
| | | value |= b2l(get()) << 8; |
| | | value |= b2l(get()); |
| | | } |
| | | return value; |
| | | } |
| | | |
| | | private static long b2l(final byte b) { |
| | | return b & 0xffL; |
| | | } |
| | | |
| | | private static int decodeSize(int b) { |
| | | return DECODE_SIZE[b & 0xff]; |
| | | } |
| | | |
| | | /** |
| | | * Relative get method for reading an short value. Reads the next 2 bytes at |
| | | * this reader's current position, composing them into an short value |
| | | * according to big-endian byte order, and then increments the position by |
| | |
| | | */ |
| | | package org.forgerock.opendj.ldap; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | |
| | | import java.io.DataInput; |
| | | import java.io.EOFException; |
| | | import java.io.IOException; |
| | |
| | | import java.nio.charset.Charset; |
| | | import java.nio.charset.CharsetDecoder; |
| | | |
| | | import org.forgerock.util.Reject; |
| | | |
| | | /** |
| | | * A mutable sequence of bytes backed by a byte array. |
| | | */ |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public ByteBuffer copyTo(final ByteBuffer byteBuffer) { |
| | | byteBuffer.put(buffer, subOffset, subLength); |
| | | byteBuffer.flip(); |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public boolean copyTo(CharBuffer charBuffer, CharsetDecoder decoder) { |
| | | return ByteString.copyTo(ByteBuffer.wrap(buffer, subOffset, subLength), charBuffer, decoder); |
| | | } |
| | |
| | | * If the {@code capacity} is negative. |
| | | */ |
| | | public ByteStringBuilder(final int capacity) { |
| | | if (capacity < 0) { |
| | | throw new IllegalArgumentException(); |
| | | } |
| | | |
| | | Reject.ifFalse(capacity >= 0, "capacity must be >= 0"); |
| | | this.buffer = new byte[capacity]; |
| | | this.length = 0; |
| | | } |
| | |
| | | } |
| | | |
| | | /** |
| | | * Appends the compact encoded bytes of the provided unsigned long to this byte |
| | | * string builder. This method allows to encode unsigned long up to 56 bits using |
| | | * fewer bytes (from 1 to 8) than append(long). The encoding has the important |
| | | * property that it preserves ordering, so it can be used for keys. |
| | | * |
| | | * @param value |
| | | * The long whose compact encoding is to be appended to this |
| | | * byte string builder. |
| | | * @return This byte string builder. |
| | | */ |
| | | public ByteStringBuilder appendCompactUnsigned(long value) { |
| | | Reject.ifFalse(value >= 0, "value must be >= 0"); |
| | | |
| | | final int size = getEncodedSize(value); |
| | | ensureAdditionalCapacity(size); |
| | | switch (size) { |
| | | case 1: |
| | | buffer[length++] = (byte) value; |
| | | break; |
| | | case 2: |
| | | buffer[length++] = (byte) ((value >>> 8) | 0x80L); |
| | | buffer[length++] = l2b(value); |
| | | break; |
| | | case 3: |
| | | buffer[length++] = (byte) ((value >>> 16) | 0xc0L); |
| | | buffer[length++] = l2b(value >>> 8); |
| | | buffer[length++] = l2b(value); |
| | | break; |
| | | case 4: |
| | | buffer[length++] = (byte) ((value >>> 24) | 0xe0L); |
| | | buffer[length++] = l2b(value >>> 16); |
| | | buffer[length++] = l2b(value >>> 8); |
| | | buffer[length++] = l2b(value); |
| | | break; |
| | | case 5: |
| | | buffer[length++] = (byte) ((value >>> 32) | 0xf0L); |
| | | buffer[length++] = l2b(value >>> 24); |
| | | buffer[length++] = l2b(value >>> 16); |
| | | buffer[length++] = l2b(value >>> 8); |
| | | buffer[length++] = l2b(value); |
| | | break; |
| | | case 6: |
| | | buffer[length++] = (byte) ((value >>> 40) | 0xf8L); |
| | | buffer[length++] = l2b(value >>> 32); |
| | | buffer[length++] = l2b(value >>> 24); |
| | | buffer[length++] = l2b(value >>> 16); |
| | | buffer[length++] = l2b(value >>> 8); |
| | | buffer[length++] = l2b(value); |
| | | break; |
| | | case 7: |
| | | buffer[length++] = (byte) ((value >>> 48) | 0xfcL); |
| | | buffer[length++] = l2b(value >>> 40); |
| | | buffer[length++] = l2b(value >>> 32); |
| | | buffer[length++] = l2b(value >>> 24); |
| | | buffer[length++] = l2b(value >>> 16); |
| | | buffer[length++] = l2b(value >>> 8); |
| | | buffer[length++] = l2b(value); |
| | | break; |
| | | default: |
| | | buffer[length++] = (byte) 0xfe; |
| | | buffer[length++] = l2b(value >>> 48); |
| | | buffer[length++] = l2b(value >>> 40); |
| | | buffer[length++] = l2b(value >>> 32); |
| | | buffer[length++] = l2b(value >>> 24); |
| | | buffer[length++] = l2b(value >>> 16); |
| | | buffer[length++] = l2b(value >>> 8); |
| | | buffer[length++] = l2b(value); |
| | | break; |
| | | } |
| | | return this; |
| | | } |
| | | |
| | | private static int getEncodedSize(long value) { |
| | | if (value < 0x80L) { |
| | | return 1; |
| | | } else if (value < 0x4000L) { |
| | | return 2; |
| | | } else if (value < 0x200000L) { |
| | | return 3; |
| | | } else if (value < 0x10000000L) { |
| | | return 4; |
| | | } else if (value < 0x800000000L) { |
| | | return 5; |
| | | } else if (value < 0x40000000000L) { |
| | | return 6; |
| | | } else if (value < 0x2000000000000L) { |
| | | return 7; |
| | | } else if (value < 0x100000000000000L) { |
| | | return 8; |
| | | } else { |
| | | throw new IllegalArgumentException("value out of range: " + value); |
| | | } |
| | | } |
| | | |
| | | private static byte l2b(long value) { |
| | | return (byte) (value & 0xffL); |
| | | } |
| | | |
| | | /** |
| | | * Appends the byte string representation of the provided object to this |
| | | * byte string builder. The object is converted to a byte string as follows: |
| | | * <ul> |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public ByteBuffer copyTo(final ByteBuffer byteBuffer) { |
| | | byteBuffer.put(buffer, 0, length); |
| | | byteBuffer.flip(); |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public boolean copyTo(CharBuffer charBuffer, CharsetDecoder decoder) { |
| | | return ByteString.copyTo(ByteBuffer.wrap(buffer, 0, length), charBuffer, decoder); |
| | | } |
| | |
| | | */ |
| | | package org.forgerock.opendj.ldap; |
| | | |
| | | import static org.fest.assertions.Assertions.*; |
| | | |
| | | import java.io.ByteArrayInputStream; |
| | | import java.io.DataInput; |
| | | import java.io.DataInputStream; |
| | |
| | | * Test case for ByteStringBuilder. |
| | | */ |
| | | @SuppressWarnings("javadoc") |
| | | @Test(groups = "unit") |
| | | public class ByteStringBuilderTestCase extends ByteSequenceTestCase { |
| | | |
| | | private static byte b(int i) { |
| | |
| | | return addlSequences; |
| | | } |
| | | |
| | | @Test(expectedExceptions = IllegalArgumentException.class) |
| | | public void testCannotAppendCompactNegativeValues() { |
| | | ByteStringBuilder builder = new ByteStringBuilder(); |
| | | builder.appendCompactUnsigned(-1); |
| | | } |
| | | |
| | | @Test(expectedExceptions = IllegalArgumentException.class) |
| | | public void testCannotAppendCompact57BitsValues() { |
| | | new ByteStringBuilder().appendCompactUnsigned(0x100000000000000L); |
| | | } |
| | | |
| | | @Test(dataProvider = "unsignedLongValues") |
| | | public void testCanAppendCompactPositiveValue(long value) { |
| | | assertThat(new ByteStringBuilder().appendCompactUnsigned(value).asReader().getCompactUnsigned()).isEqualTo( |
| | | value); |
| | | } |
| | | |
| | | @DataProvider |
| | | public Object[][] unsignedLongValues() throws Exception { |
| | | return new Object[][] { |
| | | { 0 }, { 0x80L }, { 0x81L }, { 0x4000L }, { 0x4001L }, { 0x200000L }, { 0x200001L }, |
| | | { 0x10000000L }, { 0x10000001L }, { 0x800000000L }, { 0x800000001L }, { 0x40000000000L }, |
| | | { 0x40000000001L }, { 0x2000000000000L }, { 0x2000000000001L }, { 0x00FFFFFFFFFFFFFFL } |
| | | }; |
| | | } |
| | | |
| | | |
| | | @Test(expectedExceptions = IndexOutOfBoundsException.class) |
| | | public void testAppendBadByteBufferLength1() { |
| | | new ByteStringBuilder().append(ByteBuffer.wrap(new byte[5]), -1); |
| | |
| | | Assert.assertTrue(Arrays.equals(trimmedArray, ba)); |
| | | } |
| | | |
| | | @SuppressWarnings("unused") |
| | | @Test(expectedExceptions = IllegalArgumentException.class) |
| | | public void testInvalidCapacity() { |
| | | new ByteStringBuilder(-1); |
| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Copyright 2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.util.promise.Function; |
| | | import org.forgerock.util.promise.NeverThrowsException; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | |
| | | /** |
| | | * Transforms the keys and values of a cursor from their original types to others. Typically used for |
| | | * data serialization,deserialization. |
| | | * |
| | | * @param <KI> |
| | | * Original cursor's key type |
| | | * @param <VI> |
| | | * Original cursor's value type |
| | | * @param <KO> |
| | | * Transformed cursor's key type |
| | | * @param <VO> |
| | | * Transformed cursor's value type |
| | | */ |
| | | final class CursorTransformer<KI, VI, KO, VO> implements Cursor<KO, VO> |
| | | { |
| | | |
| | | /** |
| | | * Allow to transform a cursor value given the key and the original value |
| | | * @param <KI> Original type of the cursor's key |
| | | * @param <VI> Original type of the cursor's value |
| | | * @param <VO> New transformed type of the value |
| | | * @param <E> Possible exception type |
| | | */ |
| | | interface ValueTransformer<KI, VI, VO, E extends Exception> |
| | | { |
| | | VO transform(KI key, VI value) throws E; |
| | | } |
| | | |
| | | private static final Function<Object, Object, NeverThrowsException> NO_TRANSFORM = |
| | | new Function<Object, Object, NeverThrowsException>() |
| | | { |
| | | @Override |
| | | public Object apply(Object value) throws NeverThrowsException |
| | | { |
| | | return value; |
| | | } |
| | | }; |
| | | |
| | | private final Cursor<KI, VI> input; |
| | | private final Function<KI, KO, ? extends Exception> keyTransformer; |
| | | private final ValueTransformer<KI, VI, VO, ? extends Exception> valueTransformer; |
| | | private KO cachedTransformedKey; |
| | | private VO cachedTransformedValue; |
| | | |
| | | static <KI, VI, KO, VO> Cursor<KO, VO> transformKeysAndValues(Cursor<KI, VI> input, |
| | | Function<KI, KO, ? extends Exception> keyTransformer, |
| | | ValueTransformer<KI, VI, VO, ? extends Exception> valueTransformer) |
| | | { |
| | | return new CursorTransformer<KI, VI, KO, VO>(input, keyTransformer, valueTransformer); |
| | | } |
| | | |
| | | @SuppressWarnings("unchecked") |
| | | static <KI, VI, VO> Cursor<KI, VO> transformValues(Cursor<KI, VI> input, |
| | | ValueTransformer<KI, VI, VO, ? extends Exception> valueTransformer) |
| | | { |
| | | return transformKeysAndValues(input, (Function<KI, KI, NeverThrowsException>) NO_TRANSFORM, valueTransformer); |
| | | } |
| | | |
| | | private CursorTransformer(Cursor<KI, VI> input, Function<KI, KO, ? extends Exception> keyTransformer, |
| | | ValueTransformer<KI, VI, VO, ? extends Exception> valueTransformer) |
| | | { |
| | | this.input = checkNotNull(input, "input must not be null"); |
| | | this.keyTransformer = checkNotNull(keyTransformer, "keyTransformer must not be null"); |
| | | this.valueTransformer = checkNotNull(valueTransformer, "valueTransformer must not be null"); |
| | | } |
| | | |
| | | @Override |
| | | public void close() |
| | | { |
| | | input.close(); |
| | | } |
| | | |
| | | @Override |
| | | public KO getKey() |
| | | { |
| | | if (cachedTransformedKey == null) |
| | | { |
| | | try |
| | | { |
| | | cachedTransformedKey = keyTransformer.apply(input.getKey()); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | throw new TransformationException(e, input.getKey(), input.getValue()); |
| | | } |
| | | } |
| | | return cachedTransformedKey; |
| | | } |
| | | |
| | | @Override |
| | | public VO getValue() |
| | | { |
| | | if (cachedTransformedValue == null) |
| | | { |
| | | try |
| | | { |
| | | cachedTransformedValue = valueTransformer.transform(input.getKey(), input.getValue()); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | throw new TransformationException(e, input.getKey(), input.getValue()); |
| | | } |
| | | } |
| | | return cachedTransformedValue; |
| | | } |
| | | |
| | | @Override |
| | | public boolean next() |
| | | { |
| | | clearCache(); |
| | | return input.next(); |
| | | } |
| | | |
| | | @Override |
| | | public boolean positionToKey(final ByteSequence key) |
| | | { |
| | | clearCache(); |
| | | return input.positionToKey(key); |
| | | } |
| | | |
| | | @Override |
| | | public boolean positionToKeyOrNext(final ByteSequence key) |
| | | { |
| | | clearCache(); |
| | | return input.positionToKeyOrNext(key); |
| | | } |
| | | |
| | | @Override |
| | | public boolean positionToLastKey() |
| | | { |
| | | clearCache(); |
| | | return input.positionToLastKey(); |
| | | } |
| | | |
| | | @Override |
| | | public boolean positionToIndex(int index) |
| | | { |
| | | return input.positionToIndex(index); |
| | | } |
| | | |
| | | @Override |
| | | public boolean previous() |
| | | { |
| | | clearCache(); |
| | | return input.previous(); |
| | | } |
| | | |
| | | private void clearCache() |
| | | { |
| | | cachedTransformedKey = null; |
| | | cachedTransformedValue = null; |
| | | } |
| | | |
| | | /** |
| | | * Runtime exception for problems happening during the transformation |
| | | */ |
| | | @SuppressWarnings("serial") |
| | | public static class TransformationException extends RuntimeException |
| | | { |
| | | private final Object originalKey; |
| | | private final Object originalValue; |
| | | |
| | | public TransformationException(Exception e, Object originalKey, Object originalValue) |
| | | { |
| | | super(e); |
| | | this.originalKey = originalKey; |
| | | this.originalValue = originalValue; |
| | | } |
| | | |
| | | /** |
| | | * Get the key of the record which caused the transformation error. |
| | | * |
| | | * @return The not transformed key of the record. |
| | | */ |
| | | public Object getOriginalKey() |
| | | { |
| | | return originalKey; |
| | | } |
| | | |
| | | /** |
| | | * Get the value of the record which caused the transformation error. |
| | | * |
| | | * @return The not transformed value of the record. |
| | | */ |
| | | public Object getOriginalValue() |
| | | { |
| | | return originalValue; |
| | | } |
| | | } |
| | | } |
| | |
| | | |
| | | try |
| | | { |
| | | final Cursor cursor = txn.openCursor(getName()); |
| | | final Cursor<ByteString, ByteString> cursor = txn.openCursor(getName()); |
| | | try |
| | | { |
| | | // Go up through the DIT hierarchy until we find a referral. |
| | |
| | | |
| | | try |
| | | { |
| | | final Cursor cursor = txn.openCursor(getName()); |
| | | final Cursor<ByteString, ByteString> cursor = txn.openCursor(getName()); |
| | | try |
| | | { |
| | | // Initialize the cursor very close to the starting value then |
| | |
| | | } |
| | | @Override |
| | | public void run() { |
| | | Cursor cursor = null; |
| | | Cursor<ByteString, ByteString> cursor = null; |
| | | ID2Entry id2entry = null; |
| | | RootContainer rootContainer = backend.getRootContainer(); |
| | | Iterator<EntryContainer> ecIterator = rootContainer.getEntryContainers().iterator(); |
| | |
| | | */ |
| | | EntryID getHighestEntryID(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor cursor = txn.openCursor(id2entry.getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName()); |
| | | try |
| | | { |
| | | // Position a cursor on the last data item, and the key should give the highest ID. |
| | |
| | | |
| | | try |
| | | { |
| | | final Cursor cursor = txn.openCursor(dn2id.getName()); |
| | | final Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); |
| | | try |
| | | { |
| | | // Initialize the cursor very close to the starting value. |
| | |
| | | |
| | | int subordinateEntriesDeleted = 0; |
| | | |
| | | Cursor cursor = txn.openCursor(dn2id.getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); |
| | | try |
| | | { |
| | | // Step forward until we pass the ending value. |
| | |
| | | suffix.append((byte) 0x00); |
| | | end.append((byte) 0x01); |
| | | |
| | | Cursor cursor = txn.openCursor(dn2id.getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); |
| | | try |
| | | { |
| | | |
| | |
| | | database.delete(txn); |
| | | if(database instanceof Index) |
| | | { |
| | | state.removeIndexTrustState(txn, database); |
| | | state.deleteRecord(txn, database.getName()); |
| | | } |
| | | } |
| | | |
| | |
| | | for (Index index : attributeIndex.getAllIndexes()) |
| | | { |
| | | index.delete(txn); |
| | | state.removeIndexTrustState(txn, index); |
| | | state.deleteRecord(txn, index.getName()); |
| | | } |
| | | } |
| | | |
| | |
| | | @SuppressWarnings("javadoc") |
| | | final class EntryIDSet implements Iterable<EntryID> |
| | | { |
| | | public static final EntryIDSetCodec CODEC_V1 = new EntryIDSetCodecV1(); |
| | | public static final EntryIDSetCodec CODEC_V2 = new EntryIDSetCodecV2(); |
| | | |
| | | private static final ByteSequence NO_KEY = ByteString.valueOf("<none>"); |
| | | private static final long[] EMPTY_LONG_ARRAY = new long[0]; |
| | | private static final long[] NO_ENTRY_IDS_RANGE = new long[] { 0, 0 }; |
| | |
| | | |
| | | boolean isDefined(); |
| | | |
| | | ByteString toByteString(); |
| | | |
| | | long[] getRange(); |
| | | |
| | | long[] getIDs(); |
| | |
| | | } |
| | | |
| | | /** |
| | | * Define serialization contract for EntryIDSet |
| | | */ |
| | | interface EntryIDSetCodec { |
| | | |
| | | static final int INT_SIZE = 4; |
| | | |
| | | static final int LONG_SIZE = 8; |
| | | |
| | | ByteString encode(EntryIDSet idSet); |
| | | |
| | | EntryIDSet decode(ByteSequence key, ByteString value); |
| | | } |
| | | |
| | | /** |
| | | * Concrete implements representing a set of EntryIDs, sorted in ascending order. |
| | | */ |
| | | private static final class DefinedImpl implements EntryIDSetImplementor |
| | |
| | | } |
| | | |
| | | @Override |
| | | public ByteString toByteString() |
| | | { |
| | | final ByteStringBuilder builder = new ByteStringBuilder(8 * entryIDs.length); |
| | | for (long value : entryIDs) |
| | | { |
| | | builder.append(value); |
| | | } |
| | | return builder.toByteString(); |
| | | } |
| | | |
| | | @Override |
| | | public boolean add(EntryID entryID) |
| | | { |
| | | long id = entryID.longValue(); |
| | |
| | | |
| | | if (entryIDs.length == 0) |
| | | { |
| | | entryIDs = Arrays.copyOf(anotherEntryIDSet.getIDs(), anotherEntryIDSet.getIDs().length); |
| | | entryIDs = anotherEntryIDSet.getIDs(); |
| | | return; |
| | | } |
| | | |
| | |
| | | } |
| | | |
| | | @Override |
| | | public ByteString toByteString() |
| | | { |
| | | // Set top bit. |
| | | return ByteString.valueOf(undefinedSize | Long.MIN_VALUE); |
| | | } |
| | | |
| | | @Override |
| | | public boolean add(EntryID entryID) |
| | | { |
| | | if (maintainUndefinedSize()) |
| | |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Legacy EntryIDSet codec implementation |
| | | */ |
| | | private static final class EntryIDSetCodecV1 implements EntryIDSetCodec |
| | | { |
| | | @Override |
| | | public ByteString encode(EntryIDSet idSet) |
| | | { |
| | | return ByteString.wrap(append(new ByteStringBuilder(getEstimatedSize(idSet)), idSet).trimToSize() |
| | | .getBackingArray()); |
| | | } |
| | | |
| | | @Override |
| | | public EntryIDSet decode(ByteSequence key, ByteString value) |
| | | { |
| | | checkNotNull(key, "key must not be null"); |
| | | checkNotNull(value, "value must not be null"); |
| | | |
| | | if (value.isEmpty()) |
| | | { |
| | | // Entry limit has exceeded and there is no encoded undefined set size. |
| | | return newDefinedSet(); |
| | | } |
| | | else if ((value.byteAt(0) & 0x80) == 0x80) |
| | | { |
| | | // Entry limit has exceeded and there is an encoded undefined set size. |
| | | return newUndefinedSetWithSize(key, decodeUndefinedSize(value)); |
| | | } |
| | | else |
| | | { |
| | | // Seems like entry limit has not been exceeded and the bytes is a list of entry IDs. |
| | | return newDefinedSet(decodeRaw(value.asReader(), value.length() / LONG_SIZE)); |
| | | } |
| | | } |
| | | |
| | | private int getEstimatedSize(EntryIDSet idSet) |
| | | { |
| | | if (idSet.isDefined()) |
| | | { |
| | | return idSet.getIDs().length * LONG_SIZE; |
| | | } |
| | | else |
| | | { |
| | | return LONG_SIZE; |
| | | } |
| | | } |
| | | |
| | | private long[] decodeRaw(ByteSequenceReader reader, int nbEntriesToDecode) |
| | | { |
| | | checkNotNull(reader, "builder must not be null"); |
| | | Reject.ifFalse(nbEntriesToDecode >= 0, "nbEntriesToDecode must be >= 0"); |
| | | |
| | | final long ids[] = new long[nbEntriesToDecode]; |
| | | for(int i = 0 ; i < nbEntriesToDecode ; i++) { |
| | | ids[i] = reader.getLong(); |
| | | } |
| | | return ids; |
| | | } |
| | | |
| | | private ByteStringBuilder append(ByteStringBuilder builder, EntryIDSet idSet) |
| | | { |
| | | checkNotNull(idSet, "idSet must not be null"); |
| | | checkNotNull(builder, "builder must not be null"); |
| | | |
| | | if (idSet.isDefined()) |
| | | { |
| | | for (long value : idSet.getIDs()) |
| | | { |
| | | builder.append(value); |
| | | } |
| | | return builder; |
| | | } |
| | | else |
| | | { |
| | | // Set top bit. |
| | | return builder.append(idSet.size() | Long.MIN_VALUE); |
| | | } |
| | | } |
| | | |
| | | private static long decodeUndefinedSize(ByteSequence bytes) |
| | | { |
| | | // remove top bit |
| | | return bytes.length() == LONG_SIZE ? bytes.asReader().getLong() & Long.MAX_VALUE : Long.MAX_VALUE; |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Compacted EntryIDSet codec implementation. Idea is to take advantages of |
| | | * org.forgerock.opendj.ldap.ByteStringBuilder#appendCompact() able to write small values of long in fewer bytes. |
| | | * Rather than storing the full list of IDs, we store only the difference of the Nth ID with the N-1th one in the hope |
| | | * that the result will be small enough to be compacted by appendCompact(). |
| | | */ |
| | | private static final class EntryIDSetCodecV2 implements EntryIDSetCodec |
| | | { |
| | | private static final byte UNDEFINED_SET = (byte) 0xFF; |
| | | |
| | | @Override |
| | | public ByteString encode(EntryIDSet idSet) |
| | | { |
| | | checkNotNull(idSet, "idSet must not be null"); |
| | | ByteStringBuilder builder = new ByteStringBuilder(getEstimatedSize(idSet)); |
| | | return append(builder, idSet).toByteString(); |
| | | } |
| | | |
| | | @Override |
| | | public EntryIDSet decode(ByteSequence key, ByteString value) |
| | | { |
| | | checkNotNull(key, "key must not be null"); |
| | | checkNotNull(value, "value must not be null"); |
| | | |
| | | final ByteSequenceReader reader = value.asReader(); |
| | | if ( reader.get() == UNDEFINED_SET) { |
| | | return newUndefinedSetWithSize(key, reader.getLong()); |
| | | } else { |
| | | reader.rewind(); |
| | | return newDefinedSet(decodeRaw(reader, (int) reader.getCompactUnsigned())); |
| | | } |
| | | } |
| | | |
| | | private ByteStringBuilder append(ByteStringBuilder builder, EntryIDSet idSet) |
| | | { |
| | | checkNotNull(idSet, "idSet must not be null"); |
| | | checkNotNull(builder, "builder must not be null"); |
| | | |
| | | if (idSet.isDefined()) |
| | | { |
| | | builder.appendCompactUnsigned(idSet.size()); |
| | | long basis = 0; |
| | | for (long value : idSet.getIDs()) |
| | | { |
| | | builder.appendCompactUnsigned(value - basis); |
| | | basis = value; |
| | | } |
| | | } |
| | | else |
| | | { |
| | | builder.append(UNDEFINED_SET); |
| | | builder.append(idSet.size()); |
| | | } |
| | | return builder; |
| | | } |
| | | |
| | | private int getEstimatedSize(EntryIDSet idSet) |
| | | { |
| | | checkNotNull(idSet, "idSet must not be null"); |
| | | return idSet.getIDs().length * LONG_SIZE + INT_SIZE; |
| | | } |
| | | |
| | | private long[] decodeRaw(ByteSequenceReader reader, int nbEntriesToDecode) |
| | | { |
| | | checkNotNull(reader, "reader must not be null"); |
| | | Reject.ifFalse(nbEntriesToDecode >= 0, "nbEntriesToDecode must be >= 0"); |
| | | |
| | | if ( nbEntriesToDecode == 0 ) { |
| | | return EMPTY_LONG_ARRAY; |
| | | } else { |
| | | final long ids[] = new long[nbEntriesToDecode]; |
| | | ids[0] = reader.getCompactUnsigned(); |
| | | for(int i = 1 ; i < nbEntriesToDecode ; i++) { |
| | | ids[i] = ids[i-1] + reader.getCompactUnsigned(); |
| | | } |
| | | return ids; |
| | | } |
| | | } |
| | | } |
| | | |
| | | static EntryIDSet newUndefinedSet() |
| | | { |
| | | return new EntryIDSet(new UndefinedImpl(NO_KEY, Long.MAX_VALUE)); |
| | |
| | | return new EntryIDSet(new DefinedImpl(ids)); |
| | | } |
| | | |
| | | /** |
| | | * Creates a new entry ID set from the raw database value. |
| | | * |
| | | * @param key |
| | | * The database key that contains this value. |
| | | * @param value |
| | | * The database value, or null if there are no entry IDs. |
| | | * @throws NullPointerException |
| | | * if either key or value is null |
| | | */ |
| | | static EntryIDSet newSetFromBytes(ByteSequence key, ByteString value) |
| | | { |
| | | checkNotNull(key, "key must not be null"); |
| | | checkNotNull(value, "value must not be null"); |
| | | |
| | | if (value.isEmpty()) |
| | | { |
| | | // Entry limit has exceeded and there is no encoded undefined set size. |
| | | return newUndefinedSetWithKey(key); |
| | | } |
| | | else if ((value.byteAt(0) & 0x80) == 0x80) |
| | | { |
| | | // Entry limit has exceeded and there is an encoded undefined set size. |
| | | return newUndefinedSetWithSize(key, decodeUndefinedSize(value)); |
| | | } |
| | | else |
| | | { |
| | | // Seems like entry limit has not been exceeded and the bytes is a list of entry IDs. |
| | | return newDefinedSet(decodeEntryIDSet(value)); |
| | | } |
| | | } |
| | | |
| | | private static long[] intersection(long[] set1, long[] set2) |
| | | { |
| | | long[] target = new long[Math.min(set1.length, set2.length)]; |
| | |
| | | { |
| | | checkNotNull(sets, "sets must not be null"); |
| | | |
| | | // FIXME: Benchmarks shown that its possible to have a 5x performance gain if we sort the non overlapping sets. To |
| | | // do that, we can use compareForOverlap(). In case sets are unordered and non overlapping, this optimization allow |
| | | // to skip the final sort() applied on the resulting set. |
| | | |
| | | int count = 0; |
| | | |
| | | boolean containsUndefinedSet = false; |
| | |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Decodes and returns the entryID list out of the provided byte sequence. |
| | | * |
| | | * @param bytes |
| | | * the encoded entryID list |
| | | * @return a long array representing the entryID list |
| | | */ |
| | | static long[] decodeEntryIDSet(ByteSequence bytes) |
| | | { |
| | | final ByteSequenceReader reader = bytes.asReader(); |
| | | final int count = bytes.length() / 8; |
| | | final long[] entryIDSet = new long[count]; |
| | | for (int i = 0; i < count; i++) |
| | | { |
| | | entryIDSet[i] = reader.getLong(); |
| | | } |
| | | return entryIDSet; |
| | | } |
| | | |
| | | /** |
| | | * Decodes and returns the undefined size out of the provided byte string. |
| | | * |
| | | * @param bytes |
| | | * the encoded undefined size |
| | | * @return the undefined size |
| | | */ |
| | | static long decodeUndefinedSize(ByteString bytes) |
| | | { |
| | | return bytes.length() == 8 |
| | | ? bytes.toLong() & Long.MAX_VALUE |
| | | : Long.MAX_VALUE; // remove top bit |
| | | } |
| | | |
| | | private EntryIDSetImplementor concreteImpl; |
| | | |
| | | private EntryIDSet(EntryIDSetImplementor concreteImpl) |
| | |
| | | } |
| | | |
| | | /** |
| | | * Get a database representation of this object. |
| | | * |
| | | * @return A database representation of this object as a byte array. |
| | | */ |
| | | public ByteString toByteString() |
| | | { |
| | | return concreteImpl.toByteString(); |
| | | } |
| | | |
| | | /** |
| | | * Insert an ID into this set. |
| | | * |
| | | * @param entryID |
| | |
| | | private void exportContainer(ReadableTransaction txn, EntryContainer entryContainer) |
| | | throws StorageRuntimeException, IOException, LDIFException |
| | | { |
| | | Cursor cursor = txn.openCursor(entryContainer.getID2Entry().getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(entryContainer.getID2Entry().getName()); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.*; |
| | | |
| | | import java.util.Iterator; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.util.Reject; |
| | | import org.opends.server.backends.pluggable.EntryIDSet.EntryIDSetCodec; |
| | | |
| | | /** |
| | | * This class manages the set of ID that are to be eventually added to an index |
| | |
| | | * the configured ID limit. If the limit it reached, the class stops tracking |
| | | * individual IDs and marks the set as undefined. This class is not thread safe. |
| | | */ |
| | | final class ImportIDSet { |
| | | final class ImportIDSet implements Iterable<EntryID> { |
| | | |
| | | /** The encapsulated entryIDSet where elements are stored until reaching the limit. */ |
| | | private EntryIDSet entryIDSet; |
| | |
| | | return key; |
| | | } |
| | | |
| | | @Override |
| | | public Iterator<EntryID> iterator() { |
| | | return entryIDSet.iterator(); |
| | | } |
| | | |
| | | /** |
| | | * @return Binary representation of this ID set |
| | | */ |
| | | ByteString valueToByteString() { |
| | | return entryIDSet.toByteString(); |
| | | ByteString valueToByteString(EntryIDSetCodec codec) { |
| | | checkNotNull(codec, "codec must not be null"); |
| | | return codec.encode(entryIDSet); |
| | | } |
| | | |
| | | @Override |
| | |
| | | if (entryContainer != null && !suffix.getExcludeBranches().isEmpty()) |
| | | { |
| | | logger.info(NOTE_JEB_IMPORT_MIGRATION_START, "excluded", suffix.getBaseDN()); |
| | | Cursor cursor = txn.openCursor(entryContainer.getDN2ID().getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(entryContainer.getDN2ID().getName()); |
| | | try |
| | | { |
| | | for (DN excludedDN : suffix.getExcludeBranches()) |
| | |
| | | if (entryContainer != null && !suffix.getIncludeBranches().isEmpty()) |
| | | { |
| | | logger.info(NOTE_JEB_IMPORT_MIGRATION_START, "existing", suffix.getBaseDN()); |
| | | Cursor cursor = txn.openCursor(entryContainer.getDN2ID().getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(entryContainer.getDN2ID().getName()); |
| | | try |
| | | { |
| | | final List<ByteString> includeBranches = includeBranchesAsBytes(suffix); |
| | |
| | | /** Why do we still need this if we are checking parents in the first phase? */ |
| | | private boolean checkParent(ReadableTransaction txn, ImportIDSet idSet) throws StorageRuntimeException |
| | | { |
| | | entryID = new EntryID(idSet.valueToByteString()); |
| | | entryID = idSet.iterator().next(); |
| | | parentDN = getParent(idSet.getKey()); |
| | | |
| | | //Bypass the cache for append data, lookup the parent in DN2ID and return. |
| | |
| | | public Void call() throws Exception |
| | | { |
| | | ID2Entry id2entry = entryContainer.getID2Entry(); |
| | | Cursor cursor = txn.openCursor(id2entry.getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName()); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.*; |
| | | import static org.opends.server.backends.pluggable.State.IndexFlag.*; |
| | | |
| | | import java.util.ArrayList; |
| | | import java.util.EnumSet; |
| | | import java.util.HashSet; |
| | | import java.util.List; |
| | | import java.util.Map; |
| | |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ConditionResult; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.forgerock.util.promise.NeverThrowsException; |
| | | import org.opends.server.backends.pluggable.CursorTransformer.ValueTransformer; |
| | | import org.opends.server.backends.pluggable.EntryIDSet.EntryIDSetCodec; |
| | | import org.opends.server.backends.pluggable.IndexBuffer.BufferedIndexValues; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | |
| | | |
| | | private final State state; |
| | | |
| | | private final EntryIDSetCodec codec; |
| | | |
| | | /** |
| | | * A flag to indicate if this index should be trusted to be consistent |
| | | * with the entries database. If not trusted, we assume that existing |
| | |
| | | this.indexEntryLimit = indexEntryLimit; |
| | | this.cursorEntryLimit = cursorEntryLimit; |
| | | this.maintainCount = maintainCount; |
| | | |
| | | this.state = state; |
| | | this.trusted = state.getIndexTrustState(txn, this); |
| | | |
| | | final EnumSet<IndexFlag> flags = state.getIndexFlags(txn, getName()); |
| | | this.codec = flags.contains(COMPACTED) ? CODEC_V2 : CODEC_V1; |
| | | this.trusted = flags.contains(TRUSTED); |
| | | if (!trusted && entryContainer.getHighestEntryID(txn).longValue() == 0) |
| | | { |
| | | // If there are no entries in the entry container then there |
| | |
| | | getBufferedIndexValues(buffer, keyBytes).addEntryID(keyBytes, entryID); |
| | | } |
| | | |
| | | final Cursor<ByteString, EntryIDSet> openCursor(ReadableTransaction txn) { |
| | | checkNotNull(txn, "txn must not be null"); |
| | | return CursorTransformer.transformValues(txn.openCursor(getName()), |
| | | new ValueTransformer<ByteString, ByteString, EntryIDSet, NeverThrowsException>() |
| | | { |
| | | @Override |
| | | public EntryIDSet transform(ByteString key, ByteString value) throws NeverThrowsException |
| | | { |
| | | return codec.decode(key, value); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | /** |
| | | * Delete the specified import ID set from the import ID set associated with the key. |
| | | * |
| | |
| | | ByteSequence key = importIdSet.getKey(); |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, newSetFromBytes(key, value), indexEntryLimit, maintainCount); |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | importIDSet.remove(importIdSet); |
| | | if (importIDSet.isDefined() && importIDSet.size() == 0) |
| | | { |
| | |
| | | } |
| | | else |
| | | { |
| | | value = importIDSet.valueToByteString(); |
| | | value = importIDSet.valueToByteString(codec); |
| | | txn.put(getName(), key, value); |
| | | } |
| | | } else { |
| | |
| | | ByteSequence key = importIdSet.getKey(); |
| | | ByteString value = txn.read(getName(), key); |
| | | if(value != null) { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, newSetFromBytes(key, value), indexEntryLimit, maintainCount); |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | if (importIDSet.merge(importIdSet)) { |
| | | entryLimitExceededCount++; |
| | | } |
| | | value = importIDSet.valueToByteString(); |
| | | value = importIDSet.valueToByteString(codec); |
| | | } else { |
| | | if(!importIdSet.isDefined()) { |
| | | entryLimitExceededCount++; |
| | | } |
| | | value = importIdSet.valueToByteString(); |
| | | value = importIdSet.valueToByteString(codec); |
| | | } |
| | | txn.put(getName(), key, value); |
| | | } |
| | |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | EntryIDSet entryIDSet = newSetFromBytes(key, value); |
| | | EntryIDSet entryIDSet = codec.decode(key, value); |
| | | if (entryIDSet.isDefined()) |
| | | { |
| | | updateKeyWithRMW(txn, key, deletedIDs, addedIDs); |
| | |
| | | if (oldValue != null) |
| | | { |
| | | EntryIDSet entryIDSet = computeEntryIDSet(key, oldValue.toByteString(), deletedIDs, addedIDs); |
| | | ByteString after = entryIDSet.toByteString(); |
| | | ByteString after = codec.encode(entryIDSet); |
| | | /* |
| | | * If there are no more IDs then return null indicating that the record should be removed. |
| | | * If index is not trusted then this will cause all subsequent reads for this key to |
| | |
| | | } |
| | | if (isNotEmpty(addedIDs)) |
| | | { |
| | | return addedIDs.toByteString(); |
| | | return codec.encode(addedIDs); |
| | | } |
| | | } |
| | | return null; // no change. |
| | |
| | | |
| | | private EntryIDSet computeEntryIDSet(ByteString key, ByteString value, EntryIDSet deletedIDs, EntryIDSet addedIDs) |
| | | { |
| | | EntryIDSet entryIDSet = newSetFromBytes(key, value); |
| | | EntryIDSet entryIDSet = codec.decode(key, value); |
| | | if(addedIDs != null) |
| | | { |
| | | if(entryIDSet.isDefined() && indexEntryLimit > 0) |
| | |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | EntryIDSet entryIDSet = newSetFromBytes(key, value); |
| | | EntryIDSet entryIDSet = codec.decode(key, value); |
| | | if (entryIDSet.isDefined()) |
| | | { |
| | | return ConditionResult.valueOf(entryIDSet.contains(entryID)); |
| | |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | return newSetFromBytes(key, value); |
| | | return codec.decode(key, value); |
| | | } |
| | | return trusted ? newDefinedSet() : newUndefinedSet(); |
| | | } |
| | |
| | | |
| | | ArrayList<EntryIDSet> sets = new ArrayList<EntryIDSet>(); |
| | | |
| | | Cursor cursor = txn.openCursor(getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(getName()); |
| | | try |
| | | { |
| | | boolean success; |
| | |
| | | } |
| | | } |
| | | |
| | | EntryIDSet set = newSetFromBytes(cursor.getKey(), cursor.getValue()); |
| | | EntryIDSet set = codec.decode(cursor.getKey(), cursor.getValue()); |
| | | if (!set.isDefined()) |
| | | { |
| | | // There is no point continuing. |
| | |
| | | synchronized void setTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException |
| | | { |
| | | this.trusted = trusted; |
| | | state.putIndexTrustState(txn, this, trusted); |
| | | if (trusted) { |
| | | state.addFlagsToIndex(txn, getName(), TRUSTED); |
| | | } else { |
| | | state.removeFlagsFromIndex(txn, getName(), TRUSTED); |
| | | } |
| | | } |
| | | |
| | | synchronized boolean isTrusted() |
| | |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ConditionResult; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | |
| | | EntryContainer entryContainer) throws StorageRuntimeException |
| | | { |
| | | super(name, indexer, state, 0, 0, false, txn, entryContainer); |
| | | state.putIndexTrustState(txn, this, false); |
| | | state.removeFlagsFromIndex(txn, name, IndexFlag.TRUSTED); |
| | | super.delete(txn); |
| | | } |
| | | |
| | |
| | | // Cursor through the object class database and load the object class set |
| | | // definitions. At the same time, figure out the highest token value and |
| | | // initialize the object class counter to one greater than that. |
| | | final Cursor ocCursor = txn.openCursor(ocTreeName); |
| | | final Cursor<ByteString, ByteString> ocCursor = txn.openCursor(ocTreeName); |
| | | try |
| | | { |
| | | while (ocCursor.next()) |
| | |
| | | |
| | | // Cursor through the attribute description database and load the attribute |
| | | // set definitions. |
| | | final Cursor adCursor = txn.openCursor(adTreeName); |
| | | final Cursor<ByteString, ByteString> adCursor = txn.openCursor(adTreeName); |
| | | try |
| | | { |
| | | while (adCursor.next()) |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | |
| | | import java.util.Arrays; |
| | | import java.util.Collection; |
| | | import java.util.Collections; |
| | | import java.util.EnumSet; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.UpdateFunction; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.util.StaticUtils; |
| | | |
| | |
| | | */ |
| | | class State extends DatabaseContainer |
| | | { |
| | | private static final ByteString falseBytes = ByteString.wrap(new byte[] { 0x00 }); |
| | | private static final ByteString trueBytes = ByteString.wrap(new byte[] { 0x01 }); |
| | | |
| | | /** |
| | | * Use COMPACTED serialization for new indexes. |
| | | * @see {@link EntryIDSet.EntryIDSetCompactCodec} |
| | | */ |
| | | private static final Collection<IndexFlag> DEFAULT_FLAGS = Collections.unmodifiableCollection(Arrays |
| | | .asList(IndexFlag.COMPACTED)); |
| | | |
| | | /** |
| | | * Bit-field containing possible flags that an index can have |
| | | * When adding flags, ensure that its value fits on a single bit. |
| | | */ |
| | | static enum IndexFlag |
| | | { |
| | | TRUSTED(0x01), |
| | | |
| | | /** |
| | | * Use compact encoding for indexes' ID storage. |
| | | */ |
| | | COMPACTED(0x02); |
| | | |
| | | static final EnumSet<IndexFlag> ALL_FLAGS = EnumSet.allOf(IndexFlag.class); |
| | | |
| | | final byte mask; |
| | | |
| | | IndexFlag(int mask) { |
| | | this.mask=(byte) mask; |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Create a new State object. |
| | |
| | | super(name); |
| | | } |
| | | |
| | | private ByteString keyForIndex(TreeName indexTreeName) throws StorageRuntimeException |
| | | { |
| | | return ByteString.wrap(StaticUtils.getBytes(indexTreeName.toString())); |
| | | } |
| | | |
| | | /** |
| | | * Return the key associated with the index in the state database. |
| | | * |
| | | * @param index The index we need the key for. |
| | | * @return the key |
| | | * Fetch index flags from the database. |
| | | * @param txn The database transaction or null if none. |
| | | * @param indexTreeName The tree's name of the index |
| | | * @return The flags of the index in the database or an empty set if no index has no flags. |
| | | * @throws NullPointerException if tnx or index is null |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | private ByteString keyForIndex(DatabaseContainer index) |
| | | throws StorageRuntimeException |
| | | EnumSet<IndexFlag> getIndexFlags(ReadableTransaction txn, TreeName indexTreeName) throws StorageRuntimeException { |
| | | checkNotNull(txn, "txn must not be null"); |
| | | checkNotNull(indexTreeName, "indexTreeName must not be null"); |
| | | |
| | | final ByteString value = txn.read(getName(), keyForIndex(indexTreeName)); |
| | | return decodeFlagsOrGetDefault(value); |
| | | } |
| | | |
| | | /** |
| | | * Ensure that the specified flags are set for the given index |
| | | * @param txn a non null database transaction |
| | | * @param index The index storing the trusted state info. |
| | | * @return true if the flags have been updated |
| | | * @throws NullPointerException if txn, index or flags is null |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | boolean addFlagsToIndex(WriteableTransaction txn, TreeName indexTreeName, final IndexFlag... flags) |
| | | { |
| | | String shortName = index.getName().toString(); |
| | | return ByteString.wrap(StaticUtils.getBytes(shortName)); |
| | | checkNotNull(txn, "txn must not be null"); |
| | | checkNotNull(indexTreeName, "indexTreeName must not be null"); |
| | | checkNotNull(flags, "flags must not be null"); |
| | | |
| | | return txn.update(getName(), keyForIndex(indexTreeName), new UpdateFunction() |
| | | { |
| | | @Override |
| | | public ByteSequence computeNewValue(ByteSequence oldValue) |
| | | { |
| | | final EnumSet<IndexFlag> currentFlags = decodeFlagsOrGetDefault(oldValue); |
| | | currentFlags.addAll(Arrays.asList(flags)); |
| | | return encodeFlags(currentFlags); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private EnumSet<IndexFlag> decodeFlagsOrGetDefault(ByteSequence sequence) { |
| | | if ( sequence == null ) { |
| | | return EnumSet.copyOf(DEFAULT_FLAGS); |
| | | } else { |
| | | final EnumSet<IndexFlag> indexState = EnumSet.noneOf(IndexFlag.class); |
| | | final byte indexValue = sequence.byteAt(0); |
| | | for (IndexFlag state : IndexFlag.ALL_FLAGS) |
| | | { |
| | | if ((indexValue & state.mask) == state.mask) |
| | | { |
| | | indexState.add(state); |
| | | } |
| | | } |
| | | return indexState; |
| | | } |
| | | } |
| | | |
| | | private ByteString encodeFlags(EnumSet<IndexFlag> flags) { |
| | | byte value = 0; |
| | | for(IndexFlag flag : flags) { |
| | | value |= flag.mask; |
| | | } |
| | | return ByteString.valueOf(new byte[] { value }); |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Ensure that the specified flags are not set for the given index |
| | | * @param txn a non null database transaction |
| | | * @param index The index storing the trusted state info. |
| | | * @return The flags of the index |
| | | * @throws NullPointerException if txn, index or flags is null |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | void removeFlagsFromIndex(WriteableTransaction txn, TreeName indexTreeName, final IndexFlag... flags) { |
| | | checkNotNull(txn, "txn must not be null"); |
| | | checkNotNull(indexTreeName, "indexTreeName must not be null"); |
| | | checkNotNull(flags, "flags must not be null"); |
| | | |
| | | txn.update(getName(), keyForIndex(indexTreeName), new UpdateFunction() |
| | | { |
| | | @Override |
| | | public ByteSequence computeNewValue(ByteSequence oldValue) |
| | | { |
| | | final EnumSet<IndexFlag> currentFlags = decodeFlagsOrGetDefault(oldValue); |
| | | currentFlags.removeAll(Arrays.asList(flags)); |
| | | return encodeFlags(currentFlags); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | /** |
| | |
| | | * @param txn a non null database transaction |
| | | * @param index The index storing the trusted state info. |
| | | * @return true if the entry was removed, false if it was not. |
| | | * @throws NullPointerException if txn, index is null |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | boolean removeIndexTrustState(WriteableTransaction txn, DatabaseContainer index) throws StorageRuntimeException |
| | | boolean deleteRecord(WriteableTransaction txn, TreeName indexTreeName) throws StorageRuntimeException |
| | | { |
| | | ByteString key = keyForIndex(index); |
| | | return txn.delete(getName(), key); |
| | | checkNotNull(txn, "txn must not be null"); |
| | | checkNotNull(indexTreeName, "indexTreeName must not be null"); |
| | | |
| | | return txn.delete(getName(), keyForIndex(indexTreeName)); |
| | | } |
| | | |
| | | /** |
| | | * Fetch index state from the database. |
| | | * @param txn a non null database transaction |
| | | * @param index The index storing the trusted state info. |
| | | * @return The trusted state of the index in the database. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | boolean getIndexTrustState(ReadableTransaction txn, DatabaseContainer index) |
| | | throws StorageRuntimeException |
| | | { |
| | | ByteString key = keyForIndex(index); |
| | | ByteString value = txn.read(getName(), key); |
| | | |
| | | return value != null && value.equals(trueBytes); |
| | | } |
| | | |
| | | /** |
| | | * Put index state to database. |
| | | * @param txn a non null database transaction |
| | | * @param index The index storing the trusted state info. |
| | | * @param trusted The state value to put into the database. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | void putIndexTrustState(WriteableTransaction txn, DatabaseContainer index, boolean trusted) |
| | | throws StorageRuntimeException |
| | | { |
| | | ByteString key = keyForIndex(index); |
| | | |
| | | txn.put(getName(), key, trusted ? trueBytes : falseBytes); |
| | | } |
| | | |
| | | } |
| | |
| | | |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.newDefinedSet; |
| | | import static org.opends.server.util.StaticUtils.byteArrayToHexPlusAscii; |
| | | import static org.opends.server.util.StaticUtils.stackTraceToSingleLineString; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.*; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | | |
| | | import java.io.Closeable; |
| | | import java.util.Arrays; |
| | |
| | | import org.opends.server.admin.server.ConfigurationChangeListener; |
| | | import org.opends.server.admin.std.meta.BackendVLVIndexCfgDefn.Scope; |
| | | import org.opends.server.admin.std.server.BackendVLVIndexCfg; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.Storage; |
| | |
| | | |
| | | this.sortOrder = new SortOrder(parseSortKeys(config.getSortOrder())); |
| | | this.state = state; |
| | | this.trusted = state.getIndexTrustState(txn, this); |
| | | this.trusted = state.getIndexFlags(txn, getName()).contains(IndexFlag.TRUSTED); |
| | | if (!trusted && entryContainer.getHighestEntryID(txn).longValue() == 0) |
| | | { |
| | | /* |
| | |
| | | ccr.addMessage(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD.get(getName())); |
| | | try |
| | | { |
| | | state.putIndexTrustState(txn, this, false); |
| | | state.removeFlagsFromIndex(txn, getName(), IndexFlag.TRUSTED); |
| | | } |
| | | catch (final StorageRuntimeException de) |
| | | { |
| | |
| | | synchronized void setTrusted(final WriteableTransaction txn, final boolean trusted) throws StorageRuntimeException |
| | | { |
| | | this.trusted = trusted; |
| | | state.putIndexTrustState(txn, this, trusted); |
| | | if ( trusted ) { |
| | | state.addFlagsToIndex(txn, getName(), IndexFlag.TRUSTED); |
| | | } else { |
| | | state.removeFlagsFromIndex(txn, getName(), IndexFlag.TRUSTED); |
| | | } |
| | | } |
| | | |
| | | void addEntry(final IndexBuffer buffer, final EntryID entryID, final Entry entry) throws DirectoryException |
| | |
| | | } |
| | | } |
| | | |
| | | private long[] readRange(final Cursor cursor, final int count, final StringBuilder debugBuilder) |
| | | private long[] readRange(final Cursor<ByteString, ByteString> cursor, final int count, |
| | | final StringBuilder debugBuilder) |
| | | { |
| | | long[] selectedIDs = new long[count]; |
| | | int selectedPos = 0; |
| | |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.newSetFromBytes; |
| | | import static org.opends.server.backends.pluggable.JebFormat.dnToDNKey; |
| | | import static org.opends.server.backends.pluggable.VLVIndex.decodeEntryIDFromVLVKey; |
| | | import static org.opends.server.backends.pluggable.JebFormat.*; |
| | | import static org.opends.server.backends.pluggable.VLVIndex.*; |
| | | |
| | | import java.util.AbstractSet; |
| | | import java.util.ArrayList; |
| | |
| | | */ |
| | | private void iterateID2Entry(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor cursor = txn.openCursor(id2entry.getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName()); |
| | | try |
| | | { |
| | | long storedEntryCount = id2entry.getRecordCount(txn); |
| | |
| | | */ |
| | | private void iterateDN2ID(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor cursor = txn.openCursor(dn2id.getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | |
| | | */ |
| | | private void iterateID2Children(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor cursor = txn.openCursor(id2c.getName()); |
| | | Cursor<ByteString, EntryIDSet> cursor = id2c.openCursor(txn); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | |
| | | keyCount++; |
| | | |
| | | ByteString key = cursor.getKey(); |
| | | ByteString value = cursor.getValue(); |
| | | |
| | | EntryID entryID; |
| | | try |
| | |
| | | |
| | | try |
| | | { |
| | | entryIDSet = newSetFromBytes(key, value); |
| | | entryIDSet = cursor.getValue(); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("File id2children has malformed ID list for ID %s:%n%s%n", |
| | | entryID, StaticUtils.bytesToHex(value)); |
| | | } |
| | | logger.traceException(e); |
| | | logger.trace("File id2children has malformed ID list for ID %s", entryID); |
| | | continue; |
| | | } |
| | | |
| | |
| | | */ |
| | | private void iterateID2Subtree(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor cursor = txn.openCursor(id2s.getName()); |
| | | Cursor<ByteString, EntryIDSet> cursor = id2s.openCursor(txn); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | |
| | | keyCount++; |
| | | |
| | | ByteString key = cursor.getKey(); |
| | | ByteString value = cursor.getValue(); |
| | | |
| | | EntryID entryID; |
| | | try |
| | | { |
| | |
| | | EntryIDSet entryIDSet; |
| | | try |
| | | { |
| | | entryIDSet = newSetFromBytes(key, value); |
| | | entryIDSet = cursor.getValue(); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("File id2subtree has malformed ID list " + |
| | | "for ID %s:%n%s%n", entryID, StaticUtils.bytesToHex(value)); |
| | | } |
| | | logger.traceException(e); |
| | | logger.trace("File id2subtree has malformed ID list for ID %s", entryID); |
| | | continue; |
| | | } |
| | | |
| | |
| | | return; |
| | | } |
| | | |
| | | Cursor cursor = txn.openCursor(vlvIndex.getName()); |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(vlvIndex.getName()); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | |
| | | return; |
| | | } |
| | | |
| | | Cursor cursor = txn.openCursor(index.getName()); |
| | | Cursor<ByteString,EntryIDSet> cursor = index.openCursor(txn); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | |
| | | keyCount++; |
| | | |
| | | final ByteString key = cursor.getKey(); |
| | | ByteString value = cursor.getValue(); |
| | | |
| | | EntryIDSet entryIDSet; |
| | | try |
| | | { |
| | | entryIDSet = newSetFromBytes(key, value); |
| | | entryIDSet = cursor.getValue(); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("Malformed ID list: %s%n%s", |
| | | StaticUtils.bytesToHex(value), keyDump(index.toString(), key)); |
| | | } |
| | | logger.traceException(e); |
| | | logger.trace("Malformed ID list: %n%s", keyDump(index.toString(), key)); |
| | | continue; |
| | | } |
| | | |
| | |
| | | import java.io.Closeable; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | |
| | | /** |
| | | * Cursor that iterates through records in a tree. |
| | | * @param <K> Type of the record's key |
| | | * @param <V> Type of the record's value |
| | | */ |
| | | public interface Cursor extends Closeable |
| | | public interface Cursor<K,V> extends Closeable |
| | | { |
| | | /** |
| | | * Positions the cursor to the provided key if it exists in the tree. |
| | |
| | | * @return the current record's key, |
| | | * or {@code null} if this cursor is not positioned on any record. |
| | | */ |
| | | ByteString getKey(); |
| | | K getKey(); |
| | | |
| | | /** |
| | | * Returns the value of the record on which this cursor is currently positioned. |
| | |
| | | * @return the current record's value, |
| | | * or {@code null} if this cursor is not positioned on any record. |
| | | */ |
| | | ByteString getValue(); |
| | | V getValue(); |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | |
| | | |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.opends.server.DirectoryServerTestCase; |
| | | import org.testng.annotations.DataProvider; |
| | | import org.testng.annotations.Test; |
| | | |
| | | @SuppressWarnings("javadoc") |
| | | @Test(groups = { "precommit", "pluggablebackend" }, sequential=true) |
| | | @Test(groups = { "precommit", "pluggablebackend", "unit" }, sequential=true) |
| | | public class EntryIDSetTest extends DirectoryServerTestCase |
| | | { |
| | | private static final int UNDEFINED_INITIAL_SIZE = 10; |
| | | |
| | | private final static int UNDEFINED_INITIAL_SIZE = 10; |
| | | private final static ByteString KEY = ByteString.valueOf("test"); |
| | | |
| | | @Test(expectedExceptions = NullPointerException.class) |
| | |
| | | assertIdsEquals(set.iterator(new EntryID(13)), 4L, 6L, 8L, 10L, 12L); |
| | | } |
| | | |
| | | @Test |
| | | public void testDefinedByteString() |
| | | @Test(dataProvider = "codecs") |
| | | public void testCodecs(EntryIDSetCodec codec) |
| | | { |
| | | ByteString string = newDefinedSet(4, 6, 8, 10, 12).toByteString(); |
| | | assertThat(decodeEntryIDSet(string)).containsExactly(4, 6, 8, 10, 12); |
| | | ByteString string = codec.encode(newDefinedSet(4, 6, 8, 10, 12)); |
| | | assertIdsEquals(codec.decode(KEY, string), 4, 6, 8, 10, 12); |
| | | |
| | | string = newDefinedSet().toByteString(); |
| | | assertThat(decodeEntryIDSet(string)).isEmpty(); |
| | | string = codec.encode(newUndefinedSet()); |
| | | assertThat(codec.decode(KEY, string).isDefined()).isFalse(); |
| | | assertThat(codec.decode(KEY, string).size()).isEqualTo(Long.MAX_VALUE); |
| | | |
| | | string = codec.encode(newUndefinedSetWithSize(ByteString.valueOf("none"), 1234)); |
| | | assertThat(codec.decode(KEY, string).isDefined()).isFalse(); |
| | | assertThat(codec.decode(KEY, string).size()).isEqualTo(1234); |
| | | } |
| | | |
| | | @Test(enabled = false, dataProvider = "codec") |
| | | public void testCodecsEmptyDefinedSet(EntryIDSetCodec codec) |
| | | { |
| | | // FIXME: When decoded, an empty defined set becomes an undefined set |
| | | // see OPENDJ-1833 |
| | | ByteString string = codec.encode(newDefinedSet()); |
| | | assertThat(codec.decode(KEY, string).size()).isEqualTo(0); |
| | | |
| | | string = codec.encode(newDefinedSet()); |
| | | assertThat(codec.decode(KEY, string).size()).isEqualTo(0); |
| | | } |
| | | |
| | | @Test(expectedExceptions = NullPointerException.class) |
| | |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedByteString() |
| | | { |
| | | assertThat(newUndefinedWithInitialSize().toByteString()).isEqualTo( |
| | | ByteString.valueOf(UNDEFINED_INITIAL_SIZE | Long.MIN_VALUE)); |
| | | } |
| | | |
| | | @Test |
| | | public void testNewEmptySet() |
| | | { |
| | | assertThat(newDefinedSet().isDefined()).isTrue(); |
| | |
| | | } |
| | | |
| | | @Test |
| | | public void testNewSetFromBytes() |
| | | { |
| | | assertThat(newSetFromBytes(KEY, ByteString.empty()).isDefined()).isFalse(); |
| | | assertThat(newSetFromBytes(KEY, ByteString.valueOf(42 | Long.MIN_VALUE)).isDefined()).isFalse(); |
| | | assertThat(newSetFromBytes(KEY, ByteString.valueOf(42 | Long.MIN_VALUE)).size()).isEqualTo(42); |
| | | |
| | | assertThat(newSetFromBytes(KEY, newDefinedSet(1, 2, 3).toByteString()).isDefined()).isTrue(); |
| | | assertThat(newSetFromBytes(KEY, newDefinedSet(1, 2, 3).toByteString()).size()).isEqualTo(3); |
| | | } |
| | | |
| | | @Test |
| | | public void testNewSetWIthIDs() |
| | | { |
| | | assertThat(newDefinedSet().isDefined()).isTrue(); |
| | |
| | | return newUndefinedSetWithSize(ByteString.valueOf("test"), UNDEFINED_INITIAL_SIZE); |
| | | } |
| | | |
| | | @DataProvider(name = "codecs") |
| | | public static Object[][] codecs() { |
| | | return new Object[][] { { CODEC_V1 }, { CODEC_V2 } }; |
| | | } |
| | | |
| | | } |
| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Copyright 2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.assertj.core.api.Assertions.*; |
| | | import static org.mockito.Mockito.*; |
| | | import static org.opends.server.backends.pluggable.State.IndexFlag.*; |
| | | |
| | | import java.util.UUID; |
| | | |
| | | import org.forgerock.opendj.config.server.ConfigException; |
| | | import org.opends.server.DirectoryServerTestCase; |
| | | import org.opends.server.TestCaseUtils; |
| | | import org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType; |
| | | import org.opends.server.admin.std.server.BackendIndexCfg; |
| | | import org.opends.server.admin.std.server.PersistitBackendCfg; |
| | | import org.opends.server.backends.persistit.PersistItStorage; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.ReadOperation; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.WriteOperation; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.opends.server.core.MemoryQuota; |
| | | import org.opends.server.core.ServerContext; |
| | | import org.opends.server.types.DN; |
| | | import org.opends.server.types.DirectoryException; |
| | | import org.testng.annotations.AfterMethod; |
| | | import org.testng.annotations.BeforeClass; |
| | | import org.testng.annotations.BeforeMethod; |
| | | import org.testng.annotations.Test; |
| | | |
| | | @Test(groups = { "precommit", "pluggablebackend" }, sequential = true) |
| | | public class StateTest extends DirectoryServerTestCase |
| | | { |
| | | private static final IndexFlag DEFAULT_FLAG = COMPACTED; |
| | | |
| | | private final TreeName stateTreeName = new TreeName("base-dn", "index-id"); |
| | | private TreeName indexTreeName; |
| | | private PersistItStorage storage; |
| | | private State state; |
| | | |
| | | @BeforeClass |
| | | public void startServer() throws Exception { |
| | | TestCaseUtils.startServer(); |
| | | } |
| | | |
| | | @BeforeMethod |
| | | public void setUp() throws Exception |
| | | { |
| | | indexTreeName = new TreeName("index-base-dn", "index-index-id-" + UUID.randomUUID().toString()); |
| | | |
| | | ServerContext serverContext = mock(ServerContext.class); |
| | | when(serverContext.getMemoryQuota()).thenReturn(new MemoryQuota()); |
| | | |
| | | storage = new PersistItStorage(createBackendCfg(), serverContext); |
| | | org.opends.server.backends.pluggable.spi.Importer importer = storage.startImport(); |
| | | importer.createTree(stateTreeName); |
| | | importer.close(); |
| | | |
| | | storage.open(); |
| | | |
| | | state = new State(stateTreeName); |
| | | } |
| | | |
| | | @AfterMethod |
| | | public void tearDown() { |
| | | storage.close(); |
| | | storage.removeStorageFiles(); |
| | | } |
| | | |
| | | @Test |
| | | public void testDefaultValuesForNotExistingEntries() throws Exception |
| | | { |
| | | assertThat(getFlags()).containsExactly(DEFAULT_FLAG); |
| | | } |
| | | |
| | | @Test |
| | | public void testCreateNewFlagHasDefaultValue() throws Exception |
| | | { |
| | | addFlags(); |
| | | assertThat(getFlags()).containsExactly(DEFAULT_FLAG); |
| | | } |
| | | |
| | | @Test |
| | | public void testCreateStateTrustedIsAlsoCompacted() throws Exception |
| | | { |
| | | addFlags(TRUSTED); |
| | | assertThat(getFlags()).containsExactly(TRUSTED, DEFAULT_FLAG); |
| | | } |
| | | |
| | | @Test |
| | | public void testCreateWithTrustedAndCompacted() throws Exception |
| | | { |
| | | addFlags(TRUSTED, COMPACTED); |
| | | assertThat(getFlags()).containsExactly(TRUSTED, COMPACTED); |
| | | } |
| | | |
| | | @Test |
| | | public void testUpdateNotSetDefault() throws Exception |
| | | { |
| | | createFlagWith(); |
| | | |
| | | addFlags(TRUSTED); |
| | | assertThat(getFlags()).containsExactly(TRUSTED); |
| | | } |
| | | |
| | | @Test |
| | | public void testAddFlags() throws Exception |
| | | { |
| | | createFlagWith(TRUSTED); |
| | | |
| | | addFlags(COMPACTED); |
| | | assertThat(getFlags()).containsExactly(TRUSTED, COMPACTED); |
| | | } |
| | | |
| | | @Test |
| | | public void testRemoveFlags() throws Exception |
| | | { |
| | | addFlags(COMPACTED, TRUSTED); |
| | | assertThat(getFlags()).containsExactly(TRUSTED, COMPACTED); |
| | | |
| | | removeFlags(TRUSTED); |
| | | assertThat(getFlags()).containsExactly(COMPACTED); |
| | | |
| | | removeFlags(COMPACTED); |
| | | assertThat(getFlags()).containsExactly(); |
| | | } |
| | | |
| | | @Test |
| | | public void testDeleteRecord() throws Exception |
| | | { |
| | | addFlags(COMPACTED, TRUSTED); |
| | | |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | state.deleteRecord(txn, indexTreeName); |
| | | } |
| | | }); |
| | | |
| | | assertThat(getFlags()).containsExactly(COMPACTED); |
| | | } |
| | | |
| | | private PersistitBackendCfg createBackendCfg() throws ConfigException, DirectoryException |
| | | { |
| | | String homeDirName = "pdb_test"; |
| | | PersistitBackendCfg backendCfg = mock(PersistitBackendCfg.class); |
| | | |
| | | when(backendCfg.getBackendId()).thenReturn("persTest" + homeDirName); |
| | | when(backendCfg.getDBDirectory()).thenReturn(homeDirName); |
| | | when(backendCfg.getDBDirectoryPermissions()).thenReturn("755"); |
| | | when(backendCfg.getDBCacheSize()).thenReturn(0L); |
| | | when(backendCfg.getDBCachePercent()).thenReturn(20); |
| | | when(backendCfg.isSubordinateIndexesEnabled()).thenReturn(true); |
| | | when(backendCfg.getBaseDN()).thenReturn(TestCaseUtils.newSortedSet(DN.valueOf("dc=test,dc=com"))); |
| | | when(backendCfg.dn()).thenReturn(DN.valueOf("dc=test,dc=com")); |
| | | when(backendCfg.listBackendIndexes()).thenReturn(new String[] { "sn" }); |
| | | when(backendCfg.listBackendVLVIndexes()).thenReturn(new String[0]); |
| | | |
| | | BackendIndexCfg indexCfg = mock(BackendIndexCfg.class); |
| | | when(indexCfg.getIndexType()).thenReturn(TestCaseUtils.newSortedSet(IndexType.PRESENCE, IndexType.EQUALITY)); |
| | | when(indexCfg.getAttribute()).thenReturn(DirectoryServer.getAttributeType("sn")); |
| | | when(backendCfg.getBackendIndex("sn")).thenReturn(indexCfg); |
| | | |
| | | return backendCfg; |
| | | } |
| | | |
| | | private void createFlagWith(IndexFlag... flags) throws Exception |
| | | { |
| | | createEmptyFlag(); |
| | | addFlags(flags); |
| | | } |
| | | |
| | | private void createEmptyFlag() throws Exception { |
| | | removeFlags(DEFAULT_FLAG); |
| | | } |
| | | |
| | | private void addFlags(final IndexFlag... flags) throws Exception |
| | | { |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | state.addFlagsToIndex(txn, indexTreeName, flags); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private void removeFlags(final IndexFlag... flags) throws Exception |
| | | { |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | state.removeFlagsFromIndex(txn, indexTreeName, flags); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private IndexFlag[] getFlags() throws Exception |
| | | { |
| | | return storage.read(new ReadOperation<IndexFlag[]>() |
| | | { |
| | | @Override |
| | | public IndexFlag[] run(ReadableTransaction txn) throws Exception |
| | | { |
| | | return state.getIndexFlags(txn, indexTreeName).toArray(new IndexFlag[0]); |
| | | } |
| | | }); |
| | | } |
| | | } |