mirror of https://github.com/OpenIdentityPlatform/OpenDJ.git

Yannick Lecaillez
10.14.2015 3fb5dfdd53827ebacd1601e88ace25a4a26a7e7a
OPENDJ-2217: Contended keys generate too many rollbacks
7 files modified
197 ■■■■ changed files
opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBStorage.java 5 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AbstractTree.java 9 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryContainer.java 65 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2Entry.java 56 ●●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexBuffer.java 50 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeBufferImporter.java 4 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/TreeName.java 8 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBStorage.java
@@ -105,6 +105,8 @@
public final class PDBStorage implements Storage, Backupable, ConfigurationChangeListener<PDBBackendCfg>,
  DiskSpaceMonitorHandler
{
  private static final double MAX_SLEEP_ON_RETRY_MS = 50.0;
  private static final String VOLUME_NAME = "dj";
  private static final String JOURNAL_NAME = VOLUME_NAME + "_journal";
@@ -888,7 +890,8 @@
      }
      catch (final RollbackException e)
      {
        // retry
        // retry after random sleep (reduces transactions collision. Drawback: increased latency)
        Thread.sleep((long) (Math.random() * MAX_SLEEP_ON_RETRY_MS));
      }
      catch (final Exception e)
      {
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/AbstractTree.java
@@ -35,7 +35,7 @@
 * This class is a wrapper around the tree object and provides basic
 * read and write methods for entries.
 */
abstract class AbstractTree implements Tree
abstract class AbstractTree implements Tree, Comparable<Tree>
{
  /** The name of the tree within the entryContainer. */
  private TreeName name;
@@ -89,4 +89,11 @@
  {
    return name.toString();
  }
  @Override
  public int compareTo(Tree o)
  {
    return name.compareTo(o.getName());
  }
}
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/EntryContainer.java
@@ -1475,6 +1475,15 @@
  void addEntry(final Entry entry, final AddOperation addOperation)
  throws StorageRuntimeException, DirectoryException, CanceledOperationException
  {
    final DN parentDN = getParentWithinBase(entry.getName());
    final EntryID entryID = rootContainer.getNextEntryID();
    // Insert into the indexes, in index configuration order.
    final IndexBuffer indexBuffer = new IndexBuffer();
    indexInsertEntry(indexBuffer, entry, entryID);
    final ByteString encodedEntry = id2entry.encode(entry);
    try
    {
      storage.write(new WriteOperation()
@@ -1482,8 +1491,6 @@
        @Override
        public void run(WriteableTransaction txn) throws Exception
        {
          DN parentDN = getParentWithinBase(entry.getName());
          try
          {
            // Check whether the entry already exists.
@@ -1510,14 +1517,9 @@
              id2childrenCount.addDelta(txn, parentID, 1);
            }
            EntryID entryID = rootContainer.getNextEntryID();
            dn2id.put(txn, entry.getName(), entryID);
            dn2uri.addEntry(txn, entry);
            id2entry.put(txn, entryID, entry);
            // Insert into the indexes, in index configuration order.
            final IndexBuffer indexBuffer = new IndexBuffer(EntryContainer.this);
            indexInsertEntry(indexBuffer, entry, entryID);
            id2entry.put(txn, entryID, encodedEntry);
            indexBuffer.flush(txn);
@@ -1526,13 +1528,6 @@
              // One last check before committing
              addOperation.checkIfCanceled(true);
            }
            // Update the entry cache.
            EntryCache<?> entryCache = DirectoryServer.getEntryCache();
            if (entryCache != null)
            {
              entryCache.putEntry(entry, backendID, entryID.longValue());
            }
          }
          catch (StorageRuntimeException | DirectoryException | CanceledOperationException e)
          {
@@ -1555,6 +1550,12 @@
    {
      throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class);
    }
    final EntryCache<?> entryCache = DirectoryServer.getEntryCache();
    if (entryCache != null)
    {
      entryCache.putEntry(entry, backendID, entryID.longValue());
    }
  }
  /**
@@ -1576,6 +1577,20 @@
  void deleteEntry(final DN entryDN, final DeleteOperation deleteOperation)
  throws DirectoryException, StorageRuntimeException, CanceledOperationException
  {
    final IndexBuffer indexBuffer = new IndexBuffer();
    final boolean isSubtreeDelete =
        deleteOperation != null && deleteOperation.getRequestControl(SubtreeDeleteControl.DECODER) != null;
    /*
     * We will iterate forwards through a range of the dn2id keys to find subordinates of the target entry from the top
     * of the tree downwards.
     */
    final ByteString entryDNKey = dnToDNKey(entryDN, baseDN.size());
    final ByteStringBuilder suffix = beforeKey(entryDNKey);
    final ByteStringBuilder end = afterKey(entryDNKey);
    final DN parentDN = getParentWithinBase(entryDN);
    try
    {
      storage.write(new WriteOperation()
@@ -1583,31 +1598,15 @@
        @Override
        public void run(WriteableTransaction txn) throws Exception
        {
          final IndexBuffer indexBuffer = new IndexBuffer(EntryContainer.this);
          try
          {
            // Check for referral entries above the target entry.
            dn2uri.targetEntryReferrals(txn, entryDN, null);
            // Determine whether this is a subtree delete.
            boolean isSubtreeDelete =
                deleteOperation != null && deleteOperation.getRequestControl(SubtreeDeleteControl.DECODER) != null;
            /*
             * We will iterate forwards through a range of the dn2id keys to
             * find subordinates of the target entry from the top of the tree
             * downwards.
             */
            ByteString entryDNKey = dnToDNKey(entryDN, baseDN.size());
            ByteStringBuilder suffix = beforeKey(entryDNKey);
            ByteStringBuilder end = afterKey(entryDNKey);
            int subordinateEntriesDeleted = 0;
            // Since everything under targetDN will be deleted, we only have to decrement the counter of targetDN's
            // parent. Other counters will be removed in deleteEntry()
            final DN parentDN = getParentWithinBase(entryDN);
            if (parentDN != null) {
              final EntryID parentID = dn2id.get(txn, parentDN);
              if ( parentID == null ) {
@@ -1961,7 +1960,7 @@
            id2entry.put(txn, entryID, newEntry);
            // Update the indexes.
            final IndexBuffer indexBuffer = new IndexBuffer(EntryContainer.this);
            final IndexBuffer indexBuffer = new IndexBuffer();
            if (modifyOperation != null)
            {
              // In this case we know from the operation what the modifications were.
@@ -2061,7 +2060,7 @@
            isApexEntryMoved = false;
          }
          IndexBuffer buffer = new IndexBuffer(EntryContainer.this);
          final IndexBuffer buffer = new IndexBuffer();
          try
          {
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ID2Entry.java
@@ -41,6 +41,7 @@
import org.forgerock.opendj.io.ASN1;
import org.forgerock.opendj.io.ASN1Reader;
import org.forgerock.opendj.io.ASN1Writer;
import org.forgerock.opendj.ldap.ByteSequence;
import org.forgerock.opendj.ldap.ByteString;
import org.forgerock.opendj.ldap.ByteStringBuilder;
import org.forgerock.opendj.ldap.DecodeException;
@@ -166,15 +167,7 @@
      }
    }
    private ByteString encodeCopy(Entry entry, DataConfig dataConfig)
        throws DirectoryException
    {
      encodeVolatile(entry, dataConfig);
      return encodedBuffer.toByteString();
    }
    private ByteString encodeInternal(Entry entry, DataConfig dataConfig)
        throws DirectoryException
    private ByteString encode(Entry entry, DataConfig dataConfig) throws DirectoryException
    {
      encodeVolatile(entry, dataConfig);
      return encodedBuffer.toByteString();
@@ -304,7 +297,19 @@
    EntryCodec codec = acquireEntryCodec();
    try
    {
      return codec.encodeCopy(entry, dataConfig);
      return codec.encode(entry, dataConfig);
    }
    finally
    {
      codec.release();
    }
  }
  ByteString encode(Entry entry) throws DirectoryException {
    final EntryCodec codec = acquireEntryCodec();
    try
    {
      return codec.encode(entry, dataConfig);
    }
    finally
    {
@@ -325,18 +330,15 @@
  public void put(WriteableTransaction txn, EntryID id, Entry entry)
       throws StorageRuntimeException, DirectoryException
  {
    Reject.ifNull(txn);
    ByteString key = id.toByteString();
    EntryCodec codec = acquireEntryCodec();
    try
    {
      ByteString value = codec.encodeInternal(entry, dataConfig);
      txn.put(getName(), key, value);
    Reject.ifNull(txn, "txn must not be null.");
    txn.put(getName(), id.toByteString(), encode(entry));
    }
    finally
  public void put(WriteableTransaction txn, EntryID id, ByteSequence encodedEntry)
      throws StorageRuntimeException, DirectoryException
    {
      codec.release();
    }
    Reject.ifNull(txn, "txn must not be null.");
    txn.put(getName(), id.toByteString(), encodedEntry);
  }
  /**
@@ -351,18 +353,8 @@
  public void importPut(Importer importer, EntryID id, Entry entry)
       throws StorageRuntimeException, DirectoryException
  {
    Reject.ifNull(importer);
    ByteString key = id.toByteString();
    EntryCodec codec = acquireEntryCodec();
    try
    {
      ByteString value = codec.encodeInternal(entry, dataConfig);
      importer.put(getName(), key, value);
    }
    finally
    {
      codec.release();
    }
    Reject.ifNull(importer, "importer must not be null.");
    importer.put(getName(), id.toByteString(), encode(entry));
  }
  /**
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexBuffer.java
@@ -28,9 +28,9 @@
import static org.opends.server.backends.pluggable.EntryIDSet.*;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.TreeSet;
@@ -50,16 +50,14 @@
@SuppressWarnings("javadoc")
class IndexBuffer
{
  private final EntryContainer entryContainer;
  /**
   * The buffered records stored as a map from the record key to the
   * buffered value for that key for each index.
   */
  private final LinkedHashMap<Index, TreeMap<ByteString, BufferedIndexValues>> bufferedIndexes = new LinkedHashMap<>();
  private final SortedMap<Index, SortedMap<ByteString, BufferedIndexValues>> bufferedIndexes = new TreeMap<>();
  /** The buffered records stored as a set of buffered VLV values for each index. */
  private final LinkedHashMap<VLVIndex, BufferedVLVIndexValues> bufferedVLVIndexes = new LinkedHashMap<>();
  private final SortedMap<VLVIndex, BufferedVLVIndexValues> bufferedVLVIndexes = new TreeMap<>();
  /**
   * A simple class representing a pair of added and deleted indexed IDs. Initially both addedIDs
@@ -136,16 +134,6 @@
    }
  }
  /**
   * Construct a new empty index buffer object.
   *
   * @param entryContainer The entryContainer using this index buffer.
   */
  IndexBuffer(EntryContainer entryContainer)
  {
    this.entryContainer = entryContainer;
  }
  private BufferedVLVIndexValues createOrGetBufferedVLVIndexValues(VLVIndex vlvIndex)
  {
    BufferedVLVIndexValues bufferedValues = bufferedVLVIndexes.get(vlvIndex);
@@ -172,7 +160,7 @@
  private Map<ByteString, BufferedIndexValues> createOrGetBufferedOperations(Index index)
  {
    TreeMap<ByteString, BufferedIndexValues> bufferedOperations = bufferedIndexes.get(index);
    SortedMap<ByteString, BufferedIndexValues> bufferedOperations = bufferedIndexes.get(index);
    if (bufferedOperations == null)
    {
      bufferedOperations = new TreeMap<>();
@@ -190,25 +178,15 @@
   */
  void flush(WriteableTransaction txn) throws StorageRuntimeException, DirectoryException
  {
    /*
     * FIXME: this seems like a surprising way to update the indexes. Why not store the buffered
     * changes in a TreeMap in order to have a predictable iteration order?
     */
    for (AttributeIndex attributeIndex : entryContainer.getAttributeIndexes())
    // Indexes are stored in sorted map to prevent deadlock during flush with DB using pessimistic lock strategies.
    for (Entry<Index, SortedMap<ByteString, BufferedIndexValues>> entry : bufferedIndexes.entrySet())
    {
      for (Index index : attributeIndex.getNameToIndexes().values())
      {
        flushIndex(index, txn, bufferedIndexes.remove(index));
      }
      flushIndex(entry.getKey(), txn, entry.getValue());
    }
    for (VLVIndex vlvIndex : entryContainer.getVLVIndexes())
    for (Entry<VLVIndex, BufferedVLVIndexValues> entry : bufferedVLVIndexes.entrySet())
    {
      BufferedVLVIndexValues bufferedVLVValues = bufferedVLVIndexes.remove(vlvIndex);
      if (bufferedVLVValues != null)
      {
        vlvIndex.updateIndex(txn, bufferedVLVValues.addedSortKeys, bufferedVLVValues.deletedSortKeys);
      }
      entry.getKey().updateIndex(txn, entry.getValue().addedSortKeys, entry.getValue().deletedSortKeys);
    }
  }
@@ -232,17 +210,13 @@
    createOrGetBufferedIndexValues(index, key).deleteEntryID(entryID);
  }
  private void flushIndex(Index index, WriteableTransaction txn, Map<ByteString, BufferedIndexValues> bufferedValues)
  {
    if (bufferedValues != null)
  private static void flushIndex(Index index, WriteableTransaction txn,
      Map<ByteString, BufferedIndexValues> bufferedValues)
    {
      for (Entry<ByteString, BufferedIndexValues> entry : bufferedValues.entrySet())
      {
        final ByteString key = entry.getKey();
        final BufferedIndexValues values = entry.getValue();
        index.update(txn, key, values.deletedEntryIDs, values.addedEntryIDs);
      }
      bufferedValues.clear();
      index.update(txn, entry.getKey(), values.deletedEntryIDs, values.addedEntryIDs);
    }
  }
}
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeBufferImporter.java
@@ -1434,7 +1434,7 @@
    void processVLVIndexes(WriteableTransaction txn, Suffix suffix, Entry entry, EntryID entryID)
        throws DirectoryException
    {
      final IndexBuffer buffer = new IndexBuffer(suffix.getEntryContainer());
      final IndexBuffer buffer = new IndexBuffer();
      for (VLVIndex vlvIdx : suffix.getVLVIndexes())
      {
        vlvIdx.addEntry(buffer, entryID, entry);
@@ -2969,7 +2969,7 @@
    private void processVLVIndexes(WriteableTransaction txn, Entry entry, EntryID entryID)
        throws StorageRuntimeException, DirectoryException
    {
      final IndexBuffer buffer = new IndexBuffer(entryContainer);
      final IndexBuffer buffer = new IndexBuffer();
      for (VLVIndex vlvIdx : suffix.getVLVIndexes())
      {
        vlvIdx.addEntry(buffer, entryID, entry);
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/TreeName.java
@@ -31,7 +31,7 @@
 * <p>
 * Note: This class assumes name components don't contain a '/'.
 */
public final class TreeName
public final class TreeName implements Comparable<TreeName>
{
  private final String baseDN;
  private final String indexId;
@@ -127,4 +127,10 @@
  {
    return s;
  }
  @Override
  public int compareTo(TreeName o)
  {
    return s.compareTo(o.s);
  }
}