OPENDJ-1725: Persistit: very long recovery and many discarded txns after addrate test
1 files deleted
4 files added
52 files modified
| | |
| | | ! |
| | | ! CDDL HEADER END |
| | | ! |
| | | ! Copyright 2014 ForgeRock AS |
| | | ! Copyright 2014-2015 ForgeRock AS |
| | | ! |
| | | --> |
| | | <differences> |
| | |
| | | <method>java.util.Comparator comparator(org.forgerock.opendj.ldap.schema.Schema)</method> |
| | | <justification>OPENDJ-1689 method has been removed because all matching rules should support the default comparator</justification> |
| | | </difference> |
| | | <difference> |
| | | <className>org/forgerock/opendj/ldap/ByteSequence</className> |
| | | <differenceType>7012</differenceType> |
| | | <method>boolean startsWith(org.forgerock.opendj.ldap.ByteSequence)</method> |
| | | <justification>Lack of startsWith() forced to re-implement it multiple times at different location</justification> |
| | | </difference> |
| | | </differences> |
| | |
| | | * |
| | | * |
| | | * Copyright 2009 Sun Microsystems, Inc. |
| | | * Portions copyright 2011-2014 ForgeRock AS |
| | | * Portions copyright 2011-2015 ForgeRock AS |
| | | */ |
| | | package org.forgerock.opendj.ldap; |
| | | |
| | |
| | | ByteSequence subSequence(int start, int end); |
| | | |
| | | /** |
| | | * Tests if this ByteSequence starts with the specified prefix. |
| | | * |
| | | * @param prefix |
| | | * The prefix. |
| | | * @return true if the byte sequence represented by the argument is a prefix of the byte sequence represented by |
| | | * this ByteSequence; false otherwise. Note also that true will be returned if the argument is an empty |
| | | * sequence or is equal to this ByteSequence object as determined by the equals(Object) method. |
| | | */ |
| | | boolean startsWith(ByteSequence prefix); |
| | | |
| | | /** |
| | | * Returns the Base64 encoded string representation of this byte string. |
| | | * |
| | | * @return The Base64 encoded string representation of this byte string. |
| | |
| | | * @return The {@link ByteSequenceReader} which can be used to incrementally |
| | | * read and decode data from this byte string. |
| | | */ |
| | | @Override |
| | | public ByteSequenceReader asReader() { |
| | | return new ByteSequenceReader(this); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public byte byteAt(final int index) { |
| | | if (index >= length || index < 0) { |
| | | throw new IndexOutOfBoundsException(); |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public int compareTo(final byte[] bytes, final int offset, final int length) { |
| | | checkArrayBounds(bytes, offset, length); |
| | | return compareTo(this.buffer, this.offset, this.length, bytes, offset, length); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public int compareTo(final ByteSequence o) { |
| | | if (this == o) { |
| | | return 0; |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public byte[] copyTo(final byte[] bytes) { |
| | | copyTo(bytes, 0); |
| | | return bytes; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public byte[] copyTo(final byte[] bytes, final int offset) { |
| | | if (offset < 0) { |
| | | throw new IndexOutOfBoundsException(); |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public ByteBuffer copyTo(final ByteBuffer byteBuffer) { |
| | | byteBuffer.put(buffer, offset, length); |
| | | byteBuffer.flip(); |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public ByteStringBuilder copyTo(final ByteStringBuilder builder) { |
| | | builder.append(buffer, offset, length); |
| | | return builder; |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public OutputStream copyTo(final OutputStream stream) throws IOException { |
| | | stream.write(buffer, offset, length); |
| | | return stream; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public boolean equals(final byte[] bytes, final int offset, final int length) { |
| | | checkArrayBounds(bytes, offset, length); |
| | | return equals(this.buffer, this.offset, this.length, bytes, offset, length); |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public int length() { |
| | | return length; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public ByteString subSequence(final int start, final int end) { |
| | | if (start < 0 || start > end || end > length) { |
| | | throw new IndexOutOfBoundsException(); |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public boolean startsWith(ByteSequence prefix) { |
| | | if (prefix == null || prefix.length() > length) { |
| | | return false; |
| | | } |
| | | return prefix.equals(buffer, 0, prefix.length()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public String toBase64String() { |
| | | return Base64.encode(this); |
| | | } |
| | |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public byte[] toByteArray() { |
| | | return copyTo(new byte[length]); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public ByteString toByteString() { |
| | | return this; |
| | | } |
| | |
| | | * A mutable sequence of bytes backed by a byte array. |
| | | */ |
| | | public final class ByteStringBuilder implements ByteSequence { |
| | | |
| | | /** |
| | | * Maximum value that can be stored with a compacted representation. |
| | | */ |
| | | public static final long COMPACTED_MAX_VALUE = 0xFFFFFFFFFFFFFFL; |
| | | |
| | | /** Output stream implementation. */ |
| | | private final class OutputStreamImpl extends OutputStream { |
| | | @Override |
| | |
| | | return new SubSequence(subOffset + start, end - start); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public boolean startsWith(ByteSequence prefix) { |
| | | if (prefix == null || prefix.length() > length) { |
| | | return false; |
| | | } |
| | | return prefix.equals(buffer, 0, prefix.length()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public boolean startsWith(ByteSequence prefix) { |
| | | if (prefix == null || prefix.length() > length) { |
| | | return false; |
| | | } |
| | | return prefix.equals(buffer, 0, prefix.length()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public String toBase64String() { |
| | | return Base64.encode(this); |
| | | } |
| | |
| | | ! CDDL HEADER END |
| | | ! |
| | | ! |
| | | ! Copyright 2014 ForgeRock AS. |
| | | ! Copyright 2014-2015 ForgeRock AS. |
| | | ! --> |
| | | <adm:managed-object abstract="true" name="pluggable-backend" |
| | | plural-name="pluggable-backends" package="org.forgerock.opendj.server.config" |
| | |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | <adm:property name="subordinate-indexes-enabled" advanced="true"> |
| | | <adm:synopsis> |
| | | Indicates whether id2children and id2subtree indexes should be used for |
| | | this backend. These indexes are used for constraining filtered searches |
| | | to the search request's scope as well as for generating values for the |
| | | hasSubordinates and numSubordinates virtual attributes. |
| | | </adm:synopsis> |
| | | <adm:description> |
| | | Subordinate indexing is enabled by default and should only be disabled |
| | | for specialized use cases. A typical use case is where the backend is |
| | | to be subjected to heavy add/delete load beneath the same parent entry |
| | | such as when used as a session database. Disabling the subordinate |
| | | indexes means that the numSubordinates and hasSubordinates virtual |
| | | attributes will not be supported. |
| | | </adm:description> |
| | | <adm:default-behavior> |
| | | <adm:defined> |
| | | <adm:value>true</adm:value> |
| | | </adm:defined> |
| | | </adm:default-behavior> |
| | | <adm:syntax> |
| | | <adm:boolean /> |
| | | </adm:syntax> |
| | | <adm:profile name="ldap"> |
| | | <ldap:attribute> |
| | | <ldap:name>ds-cfg-subordinate-indexes-enabled</ldap:name> |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | </adm:managed-object> |
| | |
| | | ds-cfg-entries-compressed $ |
| | | ds-cfg-compact-encoding $ |
| | | ds-cfg-index-filter-analyzer-enabled $ |
| | | ds-cfg-index-filter-analyzer-max-filters $ |
| | | ds-cfg-subordinate-indexes-enabled ) |
| | | ds-cfg-index-filter-analyzer-max-filters ) |
| | | X-ORIGIN 'OpenDJ Directory Server' ) |
| | | objectClasses: ( 1.3.6.1.4.1.36733.2.1.2.23 |
| | | NAME 'ds-cfg-persistit-backend' |
| | |
| | | */ |
| | | package org.opends.guitools.controlpanel.util; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.ConfigMessages.*; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | | |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return numSubordinates(parentDN, false); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | return numSubordinates(baseDN, true) + 1; |
| | | } |
| | | |
| | | private long numSubordinates(DN entryDN, boolean subtree) throws DirectoryException |
| | | { |
| | | ConfigEntry baseEntry = configEntries.get(entryDN); |
| | | final ConfigEntry baseEntry = configEntries.get(entryDN); |
| | | if (baseEntry == null) |
| | | { |
| | | return -1; |
| | |
| | | { |
| | | return baseEntry.getChildren().size(); |
| | | } |
| | | else |
| | | { |
| | | long count = 0; |
| | | for(ConfigEntry child : baseEntry.getChildren().values()) |
| | | { |
| | |
| | | } |
| | | return count; |
| | | } |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | |
| | | public abstract ConditionResult hasSubordinates(DN entryDN) throws DirectoryException; |
| | | |
| | | /** |
| | | * Retrieves the number of subordinates for the requested entry. |
| | | * Retrieves the number of subordinates immediately below the requested entry. |
| | | * |
| | | * @param entryDN The distinguished name of the entry. |
| | | * |
| | | * @param subtree <code>true</code> to include all entries from the |
| | | * requested entry to the lowest level in the |
| | | * tree or <code>false</code> to only include |
| | | * the entries immediately below the requested |
| | | * @param parentDN |
| | | * The distinguished name of the parent. |
| | | * @return The number of subordinate entries for the requested entry. |
| | | * @throws DirectoryException |
| | | * If baseDN isn't a base dn managed by this backend or if a problem occurs while trying to retrieve the |
| | | * entry. |
| | | * |
| | | * @return The number of subordinate entries for the requested entry |
| | | * or -1 if it can not be determined. |
| | | * |
| | | * @throws DirectoryException If a problem occurs while trying to |
| | | * retrieve the entry. |
| | | * @throws NullPointerException |
| | | * if baseDN is null. |
| | | */ |
| | | public abstract long numSubordinates(DN entryDN, boolean subtree) throws DirectoryException; |
| | | public abstract long getNumberOfChildren(DN parentDN) throws DirectoryException; |
| | | |
| | | /** |
| | | * Retrieves the number of entries for the specified base DN including all entries from the requested entry to the |
| | | * lowest level in the tree. |
| | | * |
| | | * @param baseDN |
| | | * The base distinguished name. |
| | | * @return The number of subordinate entries including the base dn. |
| | | * @throws DirectoryException |
| | | * If baseDN isn't a base dn managed by this backend or if a problem occurs while trying to retrieve the |
| | | * entry. |
| | | * @throws NullPointerException |
| | | * if baseDN is null. |
| | | */ |
| | | public abstract long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException; |
| | | |
| | | /** |
| | | * Indicates whether an entry with the specified DN exists in the backend. The default |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.server.config.ConfigConstants.*; |
| | | import static org.opends.server.schema.BooleanSyntax.*; |
| | |
| | | @Override |
| | | public ConditionResult hasSubordinates(DN entryDN) throws DirectoryException |
| | | { |
| | | long ret = numSubordinates(entryDN, false); |
| | | long ret = getNumberOfSubordinates(entryDN, false); |
| | | if(ret < 0) |
| | | { |
| | | return ConditionResult.UNDEFINED; |
| | |
| | | return ConditionResult.valueOf(ret != 0); |
| | | } |
| | | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | return getNumberOfSubordinates(baseDN, true) + 1; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | { |
| | | // If the requested entry was null, then return undefined. |
| | | if (entryDN == null) |
| | | { |
| | | return -1; |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return getNumberOfSubordinates(parentDN, false); |
| | | } |
| | | |
| | | private long getNumberOfSubordinates(DN entryDN, boolean includeSubtree) throws DirectoryException |
| | | { |
| | | // If the requested entry was the backend base entry, then return |
| | | // the number of backup directories. |
| | | if (backupBaseDN.equals(entryDN)) |
| | |
| | | |
| | | // If subtree is included, count the number of entries for each |
| | | // backup directory. |
| | | if (subtree) |
| | | if (includeSubtree) |
| | | { |
| | | count++; |
| | | try |
| | | { |
| | | BackupDirectory backupDirectory = backupDirectories.get(dir).getBackupDirectory(); |
| | |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | return -1; |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_BACKUP_INVALID_BACKUP_DIRECTORY.get( |
| | | entryDN, e.getMessage())); |
| | | } |
| | | } |
| | | |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public Entry getEntry(DN entryDN) |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.messages.ReplicationMessages.*; |
| | | import static org.opends.server.config.ConfigConstants.*; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(final DN entryDN, final boolean subtree) throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(final DN baseDN) throws DirectoryException |
| | | { |
| | | return -1; |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_NUM_SUBORDINATES_NOT_SUPPORTED.get()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(final DN parentDN) throws DirectoryException |
| | | { |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_NUM_SUBORDINATES_NOT_SUPPORTED.get()); |
| | | } |
| | | |
| | | /** |
| | |
| | | { |
| | | try |
| | | { |
| | | return numSubordinates(CHANGELOG_BASE_DN, true) + 1; |
| | | return getNumberOfEntriesInBaseDN(CHANGELOG_BASE_DN) + 1; |
| | | } |
| | | catch (DirectoryException e) |
| | | { |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.server.util.ServerConstants.*; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException |
| | | { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return getNumberOfSubordinates(parentDN, false); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException |
| | | { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | if (!Arrays.asList(baseDNs).contains(baseDN)) |
| | | { |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_LDIF_BACKEND_NUM_SUBORDINATES_NO_SUCH_ENTRY |
| | | .get(baseDN)); |
| | | } |
| | | final int baseDNIfExists = childDNs.containsKey(baseDN) ? 1 : 0; |
| | | return getNumberOfSubordinates(baseDN, true) + baseDNIfExists; |
| | | } |
| | | |
| | | private long getNumberOfSubordinates(DN entryDN, boolean includeSubtree) throws DirectoryException |
| | | { |
| | | backendLock.readLock().lock(); |
| | | |
| | |
| | | { |
| | | return 0L; |
| | | } |
| | | else |
| | | { |
| | | throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, |
| | | ERR_LDIF_BACKEND_NUM_SUBORDINATES_NO_SUCH_ENTRY.get(entryDN)); |
| | | throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, ERR_LDIF_BACKEND_NUM_SUBORDINATES_NO_SUCH_ENTRY |
| | | .get(entryDN)); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | if(!subtree) |
| | | |
| | | if (!includeSubtree) |
| | | { |
| | | return childDNSet.size(); |
| | | } |
| | | else |
| | | { |
| | | |
| | | long count = 0; |
| | | for(DN childDN : childDNSet) |
| | | { |
| | | count += numSubordinates(childDN, true); |
| | | count += getNumberOfSubordinates(childDN, true); |
| | | count ++; |
| | | } |
| | | return count; |
| | | } |
| | | |
| | | } |
| | | } |
| | | finally |
| | | { |
| | | backendLock.readLock().unlock(); |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.server.util.ServerConstants.*; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | |
| | | public synchronized ConditionResult hasSubordinates(DN entryDN) |
| | | throws DirectoryException |
| | | { |
| | | long ret = numSubordinates(entryDN, false); |
| | | long ret = getNumberOfSubordinates(entryDN, false); |
| | | if(ret < 0) |
| | | { |
| | | return ConditionResult.UNDEFINED; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public synchronized long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | return getNumberOfSubordinates(baseDN, true) + 1; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return getNumberOfSubordinates(parentDN, false); |
| | | } |
| | | |
| | | private synchronized long getNumberOfSubordinates(DN entryDN, boolean includeSubtree) throws DirectoryException |
| | | { |
| | | // Try to look up the immediate children for the DN |
| | | Set<DN> children = childDNs.get(entryDN); |
| | | final Set<DN> children = childDNs.get(entryDN); |
| | | if (children == null) |
| | | { |
| | | if(entryMap.get(entryDN) != null) |
| | |
| | | return -1; |
| | | } |
| | | |
| | | if(!subtree) |
| | | if(!includeSubtree) |
| | | { |
| | | return children.size(); |
| | | } |
| | | else |
| | | { |
| | | long count = 0; |
| | | for(DN child : children) |
| | | { |
| | | count += numSubordinates(child, true); |
| | | count += getNumberOfSubordinates(child, true); |
| | | count++; |
| | | } |
| | | return count; |
| | | } |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.messages.ConfigMessages.*; |
| | | import static org.opends.server.config.ConfigConstants.*; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(final DN entryDN, final boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(final DN baseDN) throws DirectoryException { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | return getNumberOfSubordinates(baseDN, true) + 1; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(final DN parentDN) throws DirectoryException { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return getNumberOfSubordinates(parentDN, false); |
| | | } |
| | | |
| | | private long getNumberOfSubordinates(final DN entryDN, final boolean includeSubtree) throws DirectoryException |
| | | { |
| | | final NavigableMap<DN, MonitorProvider<?>> dit = getDIT(); |
| | | if (!dit.containsKey(entryDN)) |
| | | { |
| | | return -1L; |
| | | } |
| | | else |
| | | { |
| | | long count = 0; |
| | | final int childDNSize = entryDN.size() + 1; |
| | | for (final DN dn : dit.tailMap(entryDN, false).navigableKeySet()) |
| | |
| | | { |
| | | break; |
| | | } |
| | | else if (subtree || dn.size() == childDNSize) |
| | | else if (includeSubtree || dn.size() == childDNSize) |
| | | { |
| | | count++; |
| | | } |
| | | } |
| | | return count; |
| | | } |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.server.util.ServerConstants.*; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException |
| | | { |
| | | return -1; |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_NUM_SUBORDINATES_NOT_SUPPORTED.get()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException |
| | | { |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_NUM_SUBORDINATES_NOT_SUPPORTED.get()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.messages.ConfigMessages.*; |
| | | import static org.opends.server.config.ConfigConstants.*; |
| | |
| | | public ConditionResult hasSubordinates(DN entryDN) |
| | | throws DirectoryException |
| | | { |
| | | long ret = numSubordinates(entryDN, false); |
| | | final long ret = getNumberOfChildren(entryDN); |
| | | if(ret < 0) |
| | | { |
| | | return ConditionResult.UNDEFINED; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException |
| | | { |
| | | if (entryDN == null || ! entryDN.isRootDN()) |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | if (!baseDN.isRootDN()) |
| | | { |
| | | return -1; |
| | | } |
| | | |
| | | long count = 1; |
| | | for (Map.Entry<DN, Backend<?>> entry : getSubordinateBaseDNs().entrySet()) |
| | | { |
| | | DN subBase = entry.getKey(); |
| | | Backend<?> b = entry.getValue(); |
| | | Entry subBaseEntry = b.getEntry(subBase); |
| | | if (subBaseEntry != null) |
| | | { |
| | | count++; |
| | | count += b.getNumberOfEntriesInBaseDN(subBase); |
| | | } |
| | | } |
| | | |
| | | return count; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException |
| | | { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | if (!parentDN.isRootDN()) |
| | | { |
| | | return -1; |
| | | } |
| | |
| | | for (Map.Entry<DN, Backend<?>> entry : getSubordinateBaseDNs().entrySet()) |
| | | { |
| | | DN subBase = entry.getKey(); |
| | | Backend<?> b = entry.getValue(); |
| | | Entry subBaseEntry = b.getEntry(subBase); |
| | | Entry subBaseEntry = entry.getValue().getEntry(subBase); |
| | | if (subBaseEntry != null) |
| | | { |
| | | if(subtree) |
| | | { |
| | | long subCount = b.numSubordinates(subBase, true); |
| | | if(subCount < 0) |
| | | { |
| | | return -1; |
| | | } |
| | | |
| | | count += subCount; |
| | | } |
| | | count ++; |
| | | } |
| | | } |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.messages.ConfigMessages.*; |
| | | import static org.opends.messages.SchemaMessages.*; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException |
| | | { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | return 1L; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException |
| | | { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return 0L; |
| | | } |
| | | |
| | |
| | | */ |
| | | package org.opends.server.backends; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.server.config.ConfigConstants.*; |
| | | import static org.opends.server.util.ServerConstants.*; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException |
| | | { |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, |
| | | ERR_NUM_SUBORDINATES_NOT_SUPPORTED.get()); |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_NUM_SUBORDINATES_NOT_SUPPORTED.get()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException |
| | | { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_NUM_SUBORDINATES_NOT_SUPPORTED.get()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | |
| | | package org.opends.server.backends.jeb; |
| | | |
| | | import static com.sleepycat.je.EnvironmentConfig.*; |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.backends.jeb.ConfigurableEnvironment.*; |
| | |
| | | |
| | | import java.io.File; |
| | | import java.io.IOException; |
| | | import java.util.*; |
| | | import java.util.Arrays; |
| | | import java.util.Collections; |
| | | import java.util.HashSet; |
| | | import java.util.LinkedHashMap; |
| | | import java.util.List; |
| | | import java.util.Map; |
| | | import java.util.Set; |
| | | import java.util.SortedSet; |
| | | import java.util.concurrent.ExecutionException; |
| | | import java.util.concurrent.TimeUnit; |
| | | import java.util.concurrent.atomic.AtomicInteger; |
| | |
| | | import org.opends.server.backends.RebuildConfig; |
| | | import org.opends.server.backends.VerifyConfig; |
| | | import org.opends.server.backends.pluggable.spi.StorageStatus; |
| | | import org.opends.server.core.*; |
| | | import org.opends.server.core.AddOperation; |
| | | import org.opends.server.core.DeleteOperation; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.opends.server.core.ModifyDNOperation; |
| | | import org.opends.server.core.ModifyOperation; |
| | | import org.opends.server.core.SearchOperation; |
| | | import org.opends.server.core.ServerContext; |
| | | import org.opends.server.extensions.DiskSpaceMonitor; |
| | | import org.opends.server.types.*; |
| | | import org.opends.server.types.AttributeType; |
| | | import org.opends.server.types.BackupConfig; |
| | | import org.opends.server.types.BackupDirectory; |
| | | import org.opends.server.types.CanceledOperationException; |
| | | import org.opends.server.types.DN; |
| | | import org.opends.server.types.DirectoryException; |
| | | import org.opends.server.types.Entry; |
| | | import org.opends.server.types.IdentifiedException; |
| | | import org.opends.server.types.IndexType; |
| | | import org.opends.server.types.InitializationException; |
| | | import org.opends.server.types.LDIFExportConfig; |
| | | import org.opends.server.types.LDIFImportConfig; |
| | | import org.opends.server.types.LDIFImportResult; |
| | | import org.opends.server.types.Operation; |
| | | import org.opends.server.types.Privilege; |
| | | import org.opends.server.types.RestoreConfig; |
| | | import org.opends.server.util.RuntimeInformation; |
| | | |
| | | import com.sleepycat.je.DatabaseException; |
| | |
| | | return ConditionResult.valueOf(ret != 0); |
| | | } |
| | | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | EntryContainer ec = rootContainer.getEntryContainer(baseDN); |
| | | if (ec == null || !ec.getBaseDN().equals(baseDN)) |
| | | { |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_JEB_SEARCH_NO_SUCH_OBJECT.get(baseDN)); |
| | | } |
| | | return numSubordinates(baseDN, true); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return numSubordinates(parentDN, false); |
| | | } |
| | | |
| | | private long numSubordinates(DN entryDN, boolean subtree) throws DirectoryException |
| | | { |
| | | checkRootContainerInitialized(); |
| | | EntryContainer ec = rootContainer.getEntryContainer(entryDN); |
| | |
| | | import com.sleepycat.je.*; |
| | | |
| | | import static com.sleepycat.je.LockMode.*; |
| | | |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.backends.jeb.JebFormat.*; |
| | | import static org.opends.server.core.DirectoryServer.*; |
| | |
| | | * Determine the number of subordinate entries for a given entry. |
| | | * |
| | | * @param entryDN The distinguished name of the entry. |
| | | * @param subtree <code>true</code> will include all the entries under the |
| | | * given entries. <code>false</code> will only return the |
| | | * number of entries immediately under the given entry. |
| | | * @param subtree <code>true</code> will include the entry and all the |
| | | * entries under the given entries. <code>false</code> |
| | | * will only return the number of entries immediately |
| | | * under the given entry. |
| | | * @return The number of subordinate entries for the given entry or -1 if |
| | | * the entry does not exist. |
| | | * @throws DatabaseException If an error occurs in the JE database. |
| | |
| | | if (entryID != null) |
| | | { |
| | | DatabaseEntry key = new DatabaseEntry(entryIDToDatabase(entryID.longValue())); |
| | | EntryIDSet entryIDSet; |
| | | final EntryIDSet entryIDSet; |
| | | long count; |
| | | if (subtree) |
| | | { |
| | | count = dn2id.get(null, entryDN, LockMode.DEFAULT) != null ? 1 : 0; |
| | | entryIDSet = id2subtree.readKey(key, null, LockMode.DEFAULT); |
| | | } |
| | | else |
| | | { |
| | | count = 0; |
| | | entryIDSet = id2children.readKey(key, null, LockMode.DEFAULT); |
| | | } |
| | | long count = entryIDSet.size(); |
| | | if(count != Long.MAX_VALUE) |
| | | if(entryIDSet.size() == Long.MAX_VALUE) |
| | | { |
| | | return count; |
| | | return -1; |
| | | } |
| | | return count + entryIDSet.size(); |
| | | } |
| | | return -1; |
| | | } |
| | |
| | | import java.util.HashMap; |
| | | import java.util.List; |
| | | import java.util.Map; |
| | | import java.util.NoSuchElementException; |
| | | |
| | | import org.forgerock.i18n.LocalizableMessage; |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | |
| | | { |
| | | private ByteString currentKey; |
| | | private ByteString currentValue; |
| | | private final Exchange ex; |
| | | private final Exchange exchange; |
| | | |
| | | private CursorImpl(final Exchange exchange) |
| | | { |
| | | this.ex = exchange; |
| | | this.exchange = exchange; |
| | | } |
| | | |
| | | @Override |
| | | public void close() |
| | | { |
| | | // Release immediately because this exchange did not come from the txn cache |
| | | db.releaseExchange(ex); |
| | | db.releaseExchange(exchange); |
| | | } |
| | | |
| | | @Override |
| | | public boolean isDefined() { |
| | | return exchange.getValue().isDefined(); |
| | | } |
| | | |
| | | @Override |
| | |
| | | { |
| | | if (currentKey == null) |
| | | { |
| | | currentKey = keyToBytes(ex.getKey()); |
| | | throwIfUndefined(); |
| | | currentKey = ByteString.wrap(exchange.getKey().reset().decodeByteArray()); |
| | | } |
| | | return currentKey; |
| | | } |
| | |
| | | { |
| | | if (currentValue == null) |
| | | { |
| | | currentValue = valueToBytes(ex.getValue()); |
| | | throwIfUndefined(); |
| | | currentValue = ByteString.wrap(exchange.getValue().getByteArray()); |
| | | } |
| | | return currentValue; |
| | | } |
| | |
| | | clearCurrentKeyAndValue(); |
| | | try |
| | | { |
| | | return ex.next(); |
| | | return exchange.next(); |
| | | } |
| | | catch (final PersistitException e) |
| | | { |
| | |
| | | public boolean positionToKey(final ByteSequence key) |
| | | { |
| | | clearCurrentKeyAndValue(); |
| | | bytesToKey(ex.getKey(), key); |
| | | bytesToKey(exchange.getKey(), key); |
| | | try |
| | | { |
| | | ex.fetch(); |
| | | return ex.getValue().isDefined(); |
| | | exchange.fetch(); |
| | | return exchange.getValue().isDefined(); |
| | | } |
| | | catch (final PersistitException e) |
| | | { |
| | |
| | | public boolean positionToKeyOrNext(final ByteSequence key) |
| | | { |
| | | clearCurrentKeyAndValue(); |
| | | bytesToKey(ex.getKey(), key); |
| | | bytesToKey(exchange.getKey(), key); |
| | | try |
| | | { |
| | | ex.fetch(); |
| | | return ex.getValue().isDefined() || ex.next(); |
| | | exchange.fetch(); |
| | | return exchange.getValue().isDefined() || exchange.next(); |
| | | } |
| | | catch (final PersistitException e) |
| | | { |
| | |
| | | { |
| | | // There doesn't seem to be a way to optimize this using Persistit. |
| | | clearCurrentKeyAndValue(); |
| | | ex.getKey().to(Key.BEFORE); |
| | | exchange.getKey().to(Key.BEFORE); |
| | | try |
| | | { |
| | | for (int i = 0; i <= index; i++) |
| | | { |
| | | if (!ex.next()) |
| | | if (!exchange.next()) |
| | | { |
| | | return false; |
| | | } |
| | |
| | | public boolean positionToLastKey() |
| | | { |
| | | clearCurrentKeyAndValue(); |
| | | ex.getKey().to(Key.AFTER); |
| | | exchange.getKey().to(Key.AFTER); |
| | | try |
| | | { |
| | | return ex.previous(); |
| | | return exchange.previous(); |
| | | } |
| | | catch (final PersistitException e) |
| | | { |
| | |
| | | currentKey = null; |
| | | currentValue = null; |
| | | } |
| | | |
| | | private void throwIfUndefined() { |
| | | if (!isDefined()) { |
| | | throw new NoSuchElementException(); |
| | | } |
| | | } |
| | | } |
| | | |
| | | /** PersistIt implementation of the {@link Importer} interface. */ |
| | |
| | | return value; |
| | | } |
| | | |
| | | private ByteString keyToBytes(final Key key) |
| | | { |
| | | return ByteString.wrap(key.reset().decodeByteArray()); |
| | | } |
| | | |
| | | private ByteString valueToBytes(final Value value) |
| | | { |
| | | if (value.isDefined()) |
| | |
| | | |
| | | private MatchingRuleIndex(WriteableTransaction txn, BackendIndexCfg cfg, Indexer indexer) |
| | | { |
| | | super(getIndexName(attributeType, indexer.getIndexID()), state, cfg.getIndexEntryLimit(), false, txn, |
| | | entryContainer); |
| | | super(getIndexName(attributeType, indexer.getIndexID()), state, cfg.getIndexEntryLimit(), txn, entryContainer); |
| | | this.indexer = indexer; |
| | | } |
| | | |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.core.DirectoryServer.*; |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | LocalizableMessage message = WARN_JEB_GET_ENTRY_COUNT_FAILED.get(e.getMessage()); |
| | | throw new InitializationException(message, e); |
| | | } |
| | |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | logger.traceException(e); |
| | | throw new InitializationException(ERR_BACKEND_CANNOT_REGISTER_BASEDN.get(dn, e), e); |
| | | } |
| | | } |
| | |
| | | @Override |
| | | public ConditionResult hasSubordinates(DN entryDN) throws DirectoryException |
| | | { |
| | | long ret = numSubordinates(entryDN, false); |
| | | if(ret < 0) |
| | | EntryContainer container; |
| | | try { |
| | | container = accessBegin(null, entryDN); |
| | | } |
| | | catch (DirectoryException de) |
| | | { |
| | | if (de.getResultCode() == ResultCode.UNDEFINED) |
| | | { |
| | | return ConditionResult.UNDEFINED; |
| | | } |
| | | return ConditionResult.valueOf(ret != 0); |
| | | throw de; |
| | | } |
| | | |
| | | container.sharedLock.lock(); |
| | | try |
| | | { |
| | | return ConditionResult.valueOf(container.hasSubordinates(entryDN)); |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | | { |
| | | container.sharedLock.unlock(); |
| | | accessEnd(); |
| | | } |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException |
| | | { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | final EntryContainer ec; |
| | | |
| | | try { |
| | | ec = accessBegin(null, baseDN); |
| | | } |
| | | catch (DirectoryException de) |
| | | { |
| | | throw de; |
| | | } |
| | | |
| | | ec.sharedLock.lock(); |
| | | try |
| | | { |
| | | return ec.getNumberOfEntriesInBaseDN(); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | throw new DirectoryException( |
| | | DirectoryServer.getServerErrorResultCode(), LocalizableMessage.raw(e.getMessage()), e); |
| | | } |
| | | finally |
| | | { |
| | | ec.sharedLock.unlock(); |
| | | accessEnd(); |
| | | } |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException |
| | | { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | EntryContainer ec; |
| | | |
| | | /* |
| | |
| | | * error if the EntryContainer is null... |
| | | */ |
| | | try { |
| | | ec = accessBegin(null, entryDN); |
| | | ec = accessBegin(null, parentDN); |
| | | } |
| | | catch (DirectoryException de) |
| | | { |
| | |
| | | ec.sharedLock.lock(); |
| | | try |
| | | { |
| | | long count = ec.getNumSubordinates(entryDN, subtree); |
| | | if(count == Long.MAX_VALUE) |
| | | { |
| | | // The index entry limit has exceeded and there is no count maintained. |
| | | return -1; |
| | | } |
| | | return count; |
| | | return ec.getNumberOfChildren(parentDN); |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | | { |
| | | ec.sharedLock.unlock(); |
| | | accessEnd(); |
| | | } |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public boolean entryExists(final DN entryDN) throws DirectoryException |
| | | { |
| | | EntryContainer ec = accessBegin(null, entryDN); |
| | | ec.sharedLock.lock(); |
| | | try |
| | | { |
| | | return ec.entryExists(entryDN); |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (IOException ioe) |
| | | { |
| | | logger.traceException(ioe); |
| | | throw new DirectoryException(errorRC, ERR_JEB_EXPORT_IO_ERROR.get(ioe.getMessage()), ioe); |
| | | } |
| | | catch (StorageRuntimeException de) |
| | | { |
| | | logger.traceException(de); |
| | | throw createDirectoryException(de); |
| | | } |
| | | catch (ConfigException ce) |
| | |
| | | { |
| | | throw (DirectoryException) e; |
| | | } |
| | | logger.traceException(e); |
| | | throw new DirectoryException(errorRC, e.getMessageObject(), e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw new DirectoryException(getServerErrorResultCode(), LocalizableMessage.raw(e.getMessage()), e); |
| | | } |
| | | catch (DirectoryException e) |
| | |
| | | } |
| | | catch (OpenDsException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw new DirectoryException(getServerErrorResultCode(), e.getMessageObject(), e); |
| | | } |
| | | catch (ConfigException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw new DirectoryException(getServerErrorResultCode(), e.getMessageObject(), e); |
| | | } |
| | | finally |
| | |
| | | { |
| | | rootContainer = getReadOnlyRootContainer(); |
| | | } |
| | | |
| | | VerifyJob verifyJob = new VerifyJob(verifyConfig); |
| | | return verifyJob.verifyBackend(rootContainer); |
| | | return new VerifyJob(rootContainer, verifyConfig).verifyBackend(); |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw createDirectoryException(e); |
| | | } |
| | | finally |
| | |
| | | } |
| | | catch (ExecutionException execEx) |
| | | { |
| | | logger.traceException(execEx); |
| | | throw new DirectoryException(errorRC, ERR_EXECUTION_ERROR.get(execEx.getMessage())); |
| | | throw new DirectoryException(errorRC, ERR_EXECUTION_ERROR.get(execEx.getMessage()), execEx); |
| | | } |
| | | catch (InterruptedException intEx) |
| | | { |
| | | logger.traceException(intEx); |
| | | throw new DirectoryException(errorRC, ERR_INTERRUPTED_ERROR.get(intEx.getMessage())); |
| | | throw new DirectoryException(errorRC, ERR_INTERRUPTED_ERROR.get(intEx.getMessage()), intEx); |
| | | } |
| | | catch (ConfigException ce) |
| | | { |
| | | logger.traceException(ce); |
| | | throw new DirectoryException(errorRC, ce.getMessageObject()); |
| | | throw new DirectoryException(errorRC, ce.getMessageObject(), ce); |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw new DirectoryException(errorRC, LocalizableMessage.raw(e.getMessage())); |
| | | throw new DirectoryException(errorRC, LocalizableMessage.raw(e.getMessage()), e); |
| | | } |
| | | catch (InitializationException e) |
| | | { |
| | | logger.traceException(e); |
| | | throw new InitializationException(e.getMessageObject()); |
| | | throw new InitializationException(e.getMessageObject(), e); |
| | | } |
| | | finally |
| | | { |
| | |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | logger.traceException(e); |
| | | LocalizableMessage message = ERR_JEB_OPEN_ENV_FAIL.get(e.getMessage()); |
| | | throw new InitializationException(message, e); |
| | | } |
| | |
| | | return input.positionToIndex(index); |
| | | } |
| | | |
| | | @Override |
| | | public boolean isDefined() |
| | | { |
| | | return input.isDefined(); |
| | | } |
| | | |
| | | private void clearCache() |
| | | { |
| | | cachedTransformedKey = null; |
| | |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.opends.server.backends.pluggable.JebFormat.*; |
| | | import static org.opends.server.backends.pluggable.CursorTransformer.*; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ByteStringBuilder; |
| | | import org.forgerock.util.promise.Function; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.SequentialCursor; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.types.DN; |
| | | import org.opends.server.types.DirectoryException; |
| | | |
| | | /** |
| | | * This class represents the DN database, or dn2id, which has one record |
| | |
| | | */ |
| | | class DN2ID extends AbstractDatabaseContainer |
| | | { |
| | | private final int prefixRDNComponents; |
| | | private static final Function<ByteString, Void, DirectoryException> TO_VOID_KEY = |
| | | new Function<ByteString, Void, DirectoryException>() |
| | | { |
| | | @Override |
| | | public Void apply(ByteString value) throws DirectoryException |
| | | { |
| | | return null; |
| | | } |
| | | }; |
| | | |
| | | private static final CursorTransformer.ValueTransformer<ByteString, ByteString, EntryID, Exception> TO_ENTRY_ID = |
| | | new CursorTransformer.ValueTransformer<ByteString, ByteString, EntryID, Exception>() |
| | | { |
| | | @Override |
| | | public EntryID transform(ByteString key, ByteString value) throws Exception |
| | | { |
| | | return new EntryID(value); |
| | | } |
| | | }; |
| | | |
| | | private final DN baseDN; |
| | | |
| | | |
| | | /** |
| | | * Create a DN2ID instance for the DN database in a given entryContainer. |
| | | * |
| | | * @param treeName The name of the DN database. |
| | | * @param entryContainer The entryContainer of the DN database. |
| | | * @param baseDN The base DN of the database. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | DN2ID(TreeName treeName, EntryContainer entryContainer) throws StorageRuntimeException |
| | | DN2ID(TreeName treeName, DN baseDN) throws StorageRuntimeException |
| | | { |
| | | super(treeName); |
| | | this.prefixRDNComponents = entryContainer.getBaseDN().size(); |
| | | this.baseDN = baseDN; |
| | | } |
| | | |
| | | /** |
| | |
| | | * @throws StorageRuntimeException If an error occurred while attempting to insert |
| | | * the new record. |
| | | */ |
| | | void put(WriteableTransaction txn, DN dn, EntryID id) throws StorageRuntimeException |
| | | void put(final WriteableTransaction txn, DN dn, final EntryID id) throws StorageRuntimeException |
| | | { |
| | | ByteString key = dnToDNKey(dn, prefixRDNComponents); |
| | | ByteString value = id.toByteString(); |
| | | txn.put(getName(), key, value); |
| | | txn.put(getName(), dnToKey(dn), id.toByteString()); |
| | | } |
| | | |
| | | private ByteString dnToKey(DN dn) { |
| | | return dnToDNKey(dn, baseDN.size()); |
| | | } |
| | | |
| | | /** |
| | |
| | | */ |
| | | boolean remove(WriteableTransaction txn, DN dn) throws StorageRuntimeException |
| | | { |
| | | ByteString key = dnToDNKey(dn, prefixRDNComponents); |
| | | |
| | | return txn.delete(getName(), key); |
| | | return txn.delete(getName(), dnToKey(dn)); |
| | | } |
| | | |
| | | /** |
| | |
| | | */ |
| | | EntryID get(ReadableTransaction txn, DN dn) throws StorageRuntimeException |
| | | { |
| | | ByteString key = dnToDNKey(dn, prefixRDNComponents); |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | return new EntryID(value); |
| | | final ByteString value = txn.read(getName(), dnToKey(dn)); |
| | | return value != null ? new EntryID(value) : null; |
| | | } |
| | | |
| | | Cursor<Void, EntryID> openCursor(ReadableTransaction txn, DN dn) |
| | | { |
| | | return transformKeysAndValues(openCursor0(txn, dn), TO_VOID_KEY, TO_ENTRY_ID); |
| | | } |
| | | |
| | | private Cursor<ByteString, ByteString> openCursor0(ReadableTransaction txn, DN dn) { |
| | | final Cursor<ByteString, ByteString> cursor = txn.openCursor(getName()); |
| | | cursor.positionToKey(dnToKey(dn)); |
| | | return cursor; |
| | | } |
| | | |
| | | SequentialCursor<Void, EntryID> openChildrenCursor(ReadableTransaction txn, DN dn) |
| | | { |
| | | return new ChildrenCursor(openCursor0(txn, dn)); |
| | | } |
| | | |
| | | SequentialCursor<Void, EntryID> openSubordinatesCursor(ReadableTransaction txn, DN dn) { |
| | | return new SubtreeCursor(openCursor0(txn, dn)); |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Check if two DN have a parent-child relationship. |
| | | * |
| | | * @param parent |
| | | * The potential parent |
| | | * @param child |
| | | * The potential child of parent |
| | | * @return true if child is a direct children of parent, false otherwise. |
| | | */ |
| | | static boolean isChild(ByteSequence parent, ByteSequence child) |
| | | { |
| | | if (!child.startsWith(parent)) |
| | | { |
| | | return false; |
| | | } |
| | | // Immediate children should only have one RDN separator past the parent length |
| | | for (int i = child.length(); i >= parent.length(); i--) |
| | | { |
| | | if (child.byteAt(i) == DN.NORMALIZED_RDN_SEPARATOR && i != parent.length()) |
| | | { |
| | | return false; |
| | | } |
| | | } |
| | | return true; |
| | | } |
| | | |
| | | /** |
| | | * Decorator overriding the next() behavior to iterate through children of the entry pointed by the given cursor at |
| | | * creation. |
| | | */ |
| | | private static final class ChildrenCursor extends SequentialCursorForwarding { |
| | | private final ByteStringBuilder builder; |
| | | private final ByteString parentDN; |
| | | private boolean cursorOnParent; |
| | | |
| | | ChildrenCursor(Cursor<ByteString, ByteString> delegate) |
| | | { |
| | | super(delegate); |
| | | builder = new ByteStringBuilder(128); |
| | | parentDN = delegate.isDefined() ? delegate.getKey() : null; |
| | | cursorOnParent = true; |
| | | } |
| | | |
| | | @Override |
| | | public boolean next() |
| | | { |
| | | if (cursorOnParent) { |
| | | /** Go to the first children */ |
| | | delegate.next(); |
| | | cursorOnParent = false; |
| | | } else { |
| | | /** Go to the next sibling */ |
| | | delegate.positionToKeyOrNext(nextSibling()); |
| | | } |
| | | return isDefined() && delegate.getKey().startsWith(parentDN); |
| | | } |
| | | |
| | | private ByteStringBuilder nextSibling() |
| | | { |
| | | return builder.clear().append(delegate.getKey()).append((byte) 0x1); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Decorator overriding the next() behavior to iterate through subordinates of the entry pointed by the given cursor |
| | | * at creation. |
| | | */ |
| | | private static final class SubtreeCursor extends SequentialCursorForwarding { |
| | | private final ByteString baseDN; |
| | | |
| | | SubtreeCursor(Cursor<ByteString, ByteString> delegate) |
| | | { |
| | | super(delegate); |
| | | baseDN = delegate.isDefined() ? delegate.getKey() : null; |
| | | } |
| | | |
| | | @Override |
| | | public boolean next() |
| | | { |
| | | return delegate.next() && delegate.getKey().startsWith(baseDN); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Decorator allowing to partially overrides methods of a given cursor while keeping the default behavior for other |
| | | * methods. |
| | | */ |
| | | private static class SequentialCursorForwarding implements SequentialCursor<Void, EntryID> { |
| | | final Cursor<ByteString, ByteString> delegate; |
| | | |
| | | SequentialCursorForwarding(Cursor<ByteString, ByteString> delegate) { |
| | | this.delegate = delegate; |
| | | } |
| | | |
| | | @Override |
| | | public boolean isDefined() |
| | | { |
| | | return delegate.isDefined(); |
| | | } |
| | | |
| | | @Override |
| | | public boolean next() |
| | | { |
| | | return delegate.next(); |
| | | } |
| | | |
| | | @Override |
| | | public Void getKey() |
| | | { |
| | | return null; |
| | | } |
| | | |
| | | @Override |
| | | public EntryID getValue() |
| | | { |
| | | return new EntryID(delegate.getValue()); |
| | | } |
| | | |
| | | @Override |
| | | public void close() |
| | | { |
| | | delegate.close(); |
| | | } |
| | | } |
| | | } |
| | |
| | | |
| | | /** The limit on the number of entry IDs that may be indexed by one key. */ |
| | | private int indexEntryLimit; |
| | | /** |
| | | * Whether to maintain a count of IDs for a key once the entry limit has exceeded. |
| | | */ |
| | | private final boolean maintainCount; |
| | | |
| | | private final State state; |
| | | |
| | |
| | | * The state database to persist index state info. |
| | | * @param indexEntryLimit |
| | | * The configured limit on the number of entry IDs that may be indexed by one key. |
| | | * @param maintainCount |
| | | * Whether to maintain a count of IDs for a key once the entry limit has exceeded. |
| | | * @param txn |
| | | * a non null database transaction |
| | | * @param entryContainer |
| | |
| | | * @throws StorageRuntimeException |
| | | * If an error occurs in the database. |
| | | */ |
| | | DefaultIndex(TreeName name, State state, int indexEntryLimit, boolean maintainCount, WriteableTransaction txn, |
| | | EntryContainer entryContainer) throws StorageRuntimeException |
| | | DefaultIndex(TreeName name, State state, int indexEntryLimit, WriteableTransaction txn, EntryContainer entryContainer) |
| | | throws StorageRuntimeException |
| | | { |
| | | super(name); |
| | | this.indexEntryLimit = indexEntryLimit; |
| | | this.maintainCount = maintainCount; |
| | | this.state = state; |
| | | |
| | | final EnumSet<IndexFlag> flags = state.getIndexFlags(txn, getName()); |
| | |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit); |
| | | importIDSet.merge(idsToBeAdded); |
| | | txn.put(getName(), key, importIDSet.valueToByteString(codec)); |
| | | } |
| | |
| | | ByteString value = txn.read(getName(), key); |
| | | if (value != null) |
| | | { |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit, maintainCount); |
| | | final ImportIDSet importIDSet = new ImportIDSet(key, codec.decode(key, value), indexEntryLimit); |
| | | importIDSet.remove(idsToBeRemoved); |
| | | if (importIDSet.isDefined() && importIDSet.size() == 0) |
| | | { |
| | |
| | | { |
| | | /* |
| | | * Check the special condition where both deletedIDs and addedIDs are null. This is used when |
| | | * deleting entries and corresponding id2children and id2subtree records must be completely |
| | | * removed. |
| | | * deleting entries must be completely removed. |
| | | */ |
| | | if (deletedIDs == null && addedIDs == null) |
| | | { |
| | |
| | | * Avoid taking a write lock on a record which has hit all IDs because it is likely to be a |
| | | * point of contention. |
| | | */ |
| | | if (!maintainCount && !get(txn, key).isDefined()) |
| | | if (!get(txn, key).isDefined()) |
| | | { |
| | | return; |
| | | } |
| | |
| | | }); |
| | | } |
| | | |
| | | private boolean isNullOrEmpty(EntryIDSet entryIDSet) |
| | | private static boolean isNullOrEmpty(EntryIDSet entryIDSet) |
| | | { |
| | | return entryIDSet == null || entryIDSet.size() == 0; |
| | | } |
| | | |
| | | private boolean isNotEmpty(EntryIDSet entryIDSet) |
| | | private static boolean isNotEmpty(EntryIDSet entryIDSet) |
| | | { |
| | | return entryIDSet != null && entryIDSet.size() > 0; |
| | | } |
| | |
| | | } |
| | | if (idCountDelta + entryIDSet.size() >= indexEntryLimit) |
| | | { |
| | | if (maintainCount) |
| | | { |
| | | entryIDSet = newUndefinedSetWithSize(key, entryIDSet.size() + idCountDelta); |
| | | } |
| | | else |
| | | { |
| | | entryIDSet = newUndefinedSet(); |
| | | } |
| | | |
| | | entryIDSet = newUndefinedSetWithKey(key); |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | StaticUtils.byteArrayToHexPlusAscii(builder, key.toByteArray(), 4); |
| | | logger.trace("Index entry exceeded in index %s. " + "Limit: %d. ID list size: %d.\nKey:%s", getName(), |
| | | indexEntryLimit, idCountDelta + addedIDs.size(), builder); |
| | | |
| | | } |
| | | } |
| | | else |
| | |
| | | { |
| | | return trusted; |
| | | } |
| | | |
| | | @Override |
| | | public final boolean getMaintainCount() |
| | | { |
| | | return maintainCount; |
| | | } |
| | | } |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Utils.closeSilently; |
| | | import static org.forgerock.util.Utils.*; |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.backends.pluggable.EntryIDSet.*; |
| | | import static org.opends.server.backends.pluggable.IndexFilter.*; |
| | | import static org.opends.server.backends.pluggable.JebFormat.*; |
| | | import static org.opends.server.backends.pluggable.VLVIndex.encodeTargetAssertion; |
| | | import static org.opends.server.backends.pluggable.VLVIndex.encodeVLVKey; |
| | | import static org.opends.server.backends.pluggable.VLVIndex.*; |
| | | import static org.opends.server.core.DirectoryServer.*; |
| | | import static org.opends.server.protocols.ldap.LDAPResultCode.*; |
| | | import static org.opends.server.types.AdditionalLogItem.*; |
| | |
| | | import java.util.Iterator; |
| | | import java.util.List; |
| | | import java.util.Map; |
| | | import java.util.NoSuchElementException; |
| | | import java.util.TreeMap; |
| | | import java.util.concurrent.locks.Lock; |
| | | import java.util.concurrent.locks.ReentrantReadWriteLock; |
| | |
| | | import org.forgerock.opendj.ldap.ByteStringBuilder; |
| | | import org.forgerock.opendj.ldap.ResultCode; |
| | | import org.forgerock.opendj.ldap.SearchScope; |
| | | import org.opends.messages.CoreMessages; |
| | | import org.opends.server.admin.server.ConfigurationAddListener; |
| | | import org.opends.server.admin.server.ConfigurationChangeListener; |
| | | import org.opends.server.admin.server.ConfigurationDeleteListener; |
| | |
| | | import org.opends.server.api.VirtualAttributeProvider; |
| | | import org.opends.server.api.plugin.PluginResult.SubordinateDelete; |
| | | import org.opends.server.api.plugin.PluginResult.SubordinateModifyDN; |
| | | import org.opends.server.backends.pluggable.State.IndexFlag; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadOperation; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.SequentialCursor; |
| | | import org.opends.server.backends.pluggable.spi.Storage; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | |
| | | /** Number of EntryID to considers when building EntryIDSet from DN2ID. */ |
| | | private static final int SCOPE_IDSET_LIMIT = 4096; |
| | | /** The name of the entry database. */ |
| | | private static final String ID2ENTRY_DATABASE_NAME = ID2ENTRY_INDEX_NAME; |
| | | /** The name of the DN database. */ |
| | | private static final String DN2ID_DATABASE_NAME = DN2ID_INDEX_NAME; |
| | | /** The name of the children index database. */ |
| | | private static final String ID2CHILDREN_DATABASE_NAME = ID2CHILDREN_INDEX_NAME; |
| | | /** The name of the subtree index database. */ |
| | | private static final String ID2SUBTREE_DATABASE_NAME = ID2SUBTREE_INDEX_NAME; |
| | | private static final String ID2CHILDREN_COUNT_DATABASE_NAME = ID2CHILDREN_COUNT_NAME; |
| | | /** The name of the referral database. */ |
| | | private static final String REFERRAL_DATABASE_NAME = REFERRAL_INDEX_NAME; |
| | | /** The name of the state database. */ |
| | |
| | | private final Storage storage; |
| | | |
| | | /** The DN database maps a normalized DN string to an entry ID (8 bytes). */ |
| | | private DN2ID dn2id; |
| | | private final DN2ID dn2id; |
| | | /** The entry database maps an entry ID (8 bytes) to a complete encoded entry. */ |
| | | private ID2Entry id2entry; |
| | | /** Index maps entry ID to an entry ID list containing its children. */ |
| | | private Index id2children; |
| | | /** Index maps entry ID to an entry ID list containing its subordinates. */ |
| | | private Index id2subtree; |
| | | /** Store the number of children for each entry. */ |
| | | private final ID2Count id2childrenCount; |
| | | /** The referral database maps a normalized DN string to labeled URIs. */ |
| | | private DN2URI dn2uri; |
| | | private final DN2URI dn2uri; |
| | | /** The state database maps a config DN to config entries. */ |
| | | private State state; |
| | | private final State state; |
| | | |
| | | /** The set of attribute indexes. */ |
| | | private final HashMap<AttributeType, AttributeIndex> attrIndexMap = new HashMap<AttributeType, AttributeIndex>(); |
| | |
| | | this.storage = env; |
| | | this.rootContainer = rootContainer; |
| | | this.databasePrefix = baseDN.toNormalizedUrlSafeString(); |
| | | this.id2childrenCount = new ID2Count(getIndexName(ID2CHILDREN_COUNT_DATABASE_NAME)); |
| | | this.dn2id = new DN2ID(getIndexName(DN2ID_DATABASE_NAME), baseDN); |
| | | this.dn2uri = new DN2URI(getIndexName(REFERRAL_DATABASE_NAME), this); |
| | | this.state = new State(getIndexName(STATE_DATABASE_NAME)); |
| | | |
| | | config.addPluggableChangeListener(this); |
| | | |
| | |
| | | |
| | | id2entry = new ID2Entry(getIndexName(ID2ENTRY_DATABASE_NAME), entryDataConfig); |
| | | id2entry.open(txn); |
| | | |
| | | dn2id = new DN2ID(getIndexName(DN2ID_DATABASE_NAME), this); |
| | | id2childrenCount.open(txn); |
| | | dn2id.open(txn); |
| | | |
| | | state = new State(getIndexName(STATE_DATABASE_NAME)); |
| | | state.open(txn); |
| | | |
| | | openSubordinateIndexes(txn, config); |
| | | |
| | | dn2uri = new DN2URI(getIndexName(REFERRAL_DATABASE_NAME), this); |
| | | dn2uri.open(txn); |
| | | |
| | | for (String idx : config.listBackendIndexes()) |
| | |
| | | } |
| | | } |
| | | |
| | | private NullIndex openNewNullIndex(WriteableTransaction txn, String name) |
| | | { |
| | | final TreeName treeName = getIndexName(name); |
| | | final NullIndex index = new NullIndex(treeName); |
| | | state.removeFlagsFromIndex(txn, treeName, IndexFlag.TRUSTED); |
| | | txn.deleteTree(treeName); |
| | | return index; |
| | | } |
| | | |
| | | /** |
| | | * Closes the entry container. |
| | | * |
| | |
| | | * |
| | | * @return The children database. |
| | | */ |
| | | Index getID2Children() |
| | | ID2Count getID2ChildrenCount() |
| | | { |
| | | return id2children; |
| | | } |
| | | |
| | | /** |
| | | * Get the subtree database used by this entry container. |
| | | * The entryContainer must have been opened. |
| | | * |
| | | * @return The subtree database. |
| | | */ |
| | | Index getID2Subtree() |
| | | { |
| | | return id2subtree; |
| | | return id2childrenCount; |
| | | } |
| | | |
| | | /** |
| | |
| | | } |
| | | } |
| | | |
| | | boolean hasSubordinates(final DN dn) |
| | | { |
| | | try |
| | | { |
| | | return storage.read(new ReadOperation<Boolean>() |
| | | { |
| | | @Override |
| | | public Boolean run(final ReadableTransaction txn) throws Exception |
| | | { |
| | | try (final SequentialCursor<?, ?> cursor = dn2id.openChildrenCursor(txn, dn)) |
| | | { |
| | | return cursor.next(); |
| | | } |
| | | } |
| | | }); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | throw new StorageRuntimeException(e); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Determine the number of subordinate entries for a given entry. |
| | | * Determine the number of children entries for a given entry. |
| | | * |
| | | * @param entryDN The distinguished name of the entry. |
| | | * @param subtree <code>true</code> will include all the entries under the |
| | | * given entries. <code>false</code> will only return the |
| | | * number of entries immediately under the given entry. |
| | | * @return The number of subordinate entries for the given entry or -1 if |
| | | * @return The number of children entries for the given entry or -1 if |
| | | * the entry does not exist. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | long getNumSubordinates(final DN entryDN, final boolean subtree) |
| | | throws StorageRuntimeException |
| | | long getNumberOfChildren(final DN entryDN) throws StorageRuntimeException |
| | | { |
| | | try |
| | | { |
| | |
| | | @Override |
| | | public Long run(ReadableTransaction txn) throws Exception |
| | | { |
| | | EntryID entryID = dn2id.get(txn, entryDN); |
| | | if (entryID != null) |
| | | { |
| | | final Index index = subtree ? id2subtree : id2children; |
| | | final EntryIDSet entryIDSet = index.get(txn, entryID.toByteString()); |
| | | long count = entryIDSet.size(); |
| | | if (count != Long.MAX_VALUE) |
| | | { |
| | | return count; |
| | | } |
| | | } |
| | | return -1L; |
| | | final EntryID entryID = dn2id.get(txn, entryDN); |
| | | return entryID != null ? id2childrenCount.getCount(txn, entryID) : -1; |
| | | } |
| | | }); |
| | | } |
| | |
| | | |
| | | if (!isBelowFilterThreshold(entryIDSet)) |
| | | { |
| | | // Evaluate the search scope against the id2children and id2subtree indexes |
| | | EntryID baseID = dn2id.get(txn, aBaseDN); |
| | | if (baseID == null) |
| | | { |
| | | LocalizableMessage message = ERR_JEB_SEARCH_NO_SUCH_OBJECT.get(aBaseDN); |
| | | DN matchedDN = getMatchedDN(txn, aBaseDN); |
| | | throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, message, matchedDN, null); |
| | | } |
| | | ByteString baseIDData = baseID.toByteString(); |
| | | |
| | | EntryIDSet scopeSet; |
| | | if (searchScope == SearchScope.SINGLE_LEVEL) |
| | | { |
| | | scopeSet = id2children.get(txn, baseIDData); |
| | | } |
| | | else |
| | | { |
| | | scopeSet = id2subtree.get(txn, baseIDData); |
| | | if (searchScope == SearchScope.WHOLE_SUBTREE) |
| | | { |
| | | // The id2subtree list does not include the base entry ID. |
| | | scopeSet.add(baseID); |
| | | } |
| | | } |
| | | final EntryIDSet scopeSet = getIDSetFromScope(txn, aBaseDN, searchScope); |
| | | entryIDSet.retainAll(scopeSet); |
| | | if (debugBuffer != null) |
| | | { |
| | |
| | | } |
| | | return null; |
| | | } |
| | | |
| | | private EntryIDSet getIDSetFromScope(final ReadableTransaction txn, DN aBaseDN, SearchScope searchScope) |
| | | throws DirectoryException |
| | | { |
| | | final EntryIDSet scopeSet; |
| | | try |
| | | { |
| | | switch (searchScope.asEnum()) |
| | | { |
| | | case BASE_OBJECT: |
| | | try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openCursor(txn, aBaseDN)) |
| | | { |
| | | scopeSet = EntryIDSet.newDefinedSet(scopeCursor.getValue().longValue()); |
| | | } |
| | | break; |
| | | case SINGLE_LEVEL: |
| | | try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openChildrenCursor(txn, aBaseDN)) |
| | | { |
| | | scopeSet = newIDSetFromCursor(scopeCursor, false); |
| | | } |
| | | break; |
| | | case SUBORDINATES: |
| | | case WHOLE_SUBTREE: |
| | | try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openSubordinatesCursor(txn, aBaseDN)) |
| | | { |
| | | scopeSet = newIDSetFromCursor(scopeCursor, searchScope.equals(SearchScope.WHOLE_SUBTREE)); |
| | | } |
| | | break; |
| | | default: |
| | | throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, |
| | | CoreMessages.INFO_ERROR_SEARCH_SCOPE_NOT_ALLOWED.get()); |
| | | } |
| | | } |
| | | catch (NoSuchElementException e) |
| | | { |
| | | throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, ERR_JEB_SEARCH_NO_SUCH_OBJECT.get(aBaseDN), |
| | | getMatchedDN(txn, aBaseDN), e); |
| | | } |
| | | return scopeSet; |
| | | } |
| | | }); |
| | | } |
| | | catch (Exception e) |
| | |
| | | } |
| | | } |
| | | |
| | | private static EntryIDSet newIDSetFromCursor(SequentialCursor<?, EntryID> cursor, boolean includeCurrent) |
| | | { |
| | | final long ids[] = new long[SCOPE_IDSET_LIMIT]; |
| | | int offset = 0; |
| | | if (includeCurrent) { |
| | | ids[offset++] = cursor.getValue().longValue(); |
| | | } |
| | | for(; offset < ids.length && cursor.next() ; offset++) { |
| | | ids[offset] = cursor.getValue().longValue(); |
| | | } |
| | | return offset == SCOPE_IDSET_LIMIT |
| | | ? EntryIDSet.newUndefinedSet() |
| | | : EntryIDSet.newDefinedSet(Arrays.copyOf(ids, offset)); |
| | | } |
| | | |
| | | private <E1 extends Exception, E2 extends Exception> |
| | | void throwAllowedExceptionTypes(Exception e, Class<E1> clazz1, Class<E2> clazz2) |
| | | throws E1, E2 |
| | |
| | | DN matchedDN = getMatchedDN(txn, baseDN); |
| | | throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, message, matchedDN, null); |
| | | } |
| | | id2childrenCount.addDelta(txn, parentID, 1); |
| | | } |
| | | |
| | | EntryID entryID = rootContainer.getNextEntryID(); |
| | |
| | | final IndexBuffer indexBuffer = new IndexBuffer(EntryContainer.this); |
| | | indexInsertEntry(indexBuffer, entry, entryID); |
| | | |
| | | // Insert into id2children and id2subtree. |
| | | // The database transaction locks on these records will be hotly |
| | | // contested so we do them last so as to hold the locks for the |
| | | // shortest duration. |
| | | if (parentDN != null) |
| | | { |
| | | final ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | indexBuffer.put(id2children, parentIDKeyBytes, entryID); |
| | | indexBuffer.put(id2subtree, parentIDKeyBytes, entryID); |
| | | |
| | | // Iterate up through the superior entries, starting above the |
| | | // parent. |
| | | for (DN dn = getParentWithinBase(parentDN); dn != null; dn = getParentWithinBase(dn)) |
| | | { |
| | | // Read the ID from dn2id. |
| | | EntryID nodeID = dn2id.get(txn, dn); |
| | | if (nodeID == null) |
| | | { |
| | | throw new StorageRuntimeException(ERR_JEB_MISSING_DN2ID_RECORD.get(dn).toString()); |
| | | } |
| | | |
| | | // Insert into id2subtree for this node. |
| | | indexBuffer.put(id2subtree, nodeID.toByteString(), entryID); |
| | | } |
| | | } |
| | | indexBuffer.flush(txn); |
| | | |
| | | if (addOperation != null) |
| | |
| | | // Remove from the indexes, in index config order. |
| | | indexRemoveEntry(indexBuffer, entry, leafID); |
| | | |
| | | // Remove the id2c and id2s records for this entry. |
| | | final ByteString leafIDKeyBytes = leafID.toByteString(); |
| | | indexBuffer.remove(id2children, leafIDKeyBytes); |
| | | indexBuffer.remove(id2subtree, leafIDKeyBytes); |
| | | // Remove the children counter for this entry. |
| | | id2childrenCount.deleteCount(txn, leafID); |
| | | |
| | | // Iterate up through the superior entries from the target entry. |
| | | boolean isParent = true; |
| | | for (DN parentDN = getParentWithinBase(targetDN); parentDN != null; |
| | | parentDN = getParentWithinBase(parentDN)) |
| | | final DN parentDN = getParentWithinBase(targetDN); |
| | | if (parentDN != null) |
| | | { |
| | | // Read the ID from dn2id. |
| | | EntryID parentID = dn2id.get(txn, parentDN); |
| | | final EntryID parentID = dn2id.get(txn, parentDN); |
| | | if (parentID == null) |
| | | { |
| | | throw new StorageRuntimeException(ERR_JEB_MISSING_DN2ID_RECORD.get(parentDN).toString()); |
| | | } |
| | | |
| | | ByteString parentIDBytes = parentID.toByteString(); |
| | | // Remove from id2children. |
| | | if (isParent) |
| | | { |
| | | indexBuffer.remove(id2children, parentIDBytes, leafID); |
| | | isParent = false; |
| | | } |
| | | indexBuffer.remove(id2subtree, parentIDBytes, leafID); |
| | | id2childrenCount.addDelta(txn, parentID, -1); |
| | | } |
| | | |
| | | // Remove the entry from the entry cache. |
| | |
| | | return dn2id.get(txn, entryDN) != null; |
| | | } |
| | | |
| | | |
| | | boolean entryExists(final DN entryDN) throws StorageRuntimeException |
| | | { |
| | | final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); |
| | | if (entryCache != null && entryCache.containsEntry(entryDN)) |
| | | { |
| | | return true; |
| | | } |
| | | |
| | | try |
| | | { |
| | | return storage.read(new ReadOperation<Boolean>() |
| | | { |
| | | @Override |
| | | public Boolean run(ReadableTransaction txn) throws Exception |
| | | { |
| | | return dn2id.get(txn, entryDN) != null; |
| | | } |
| | | }); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | throw new StorageRuntimeException(e); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Fetch an entry by DN, trying the entry cache first, then the database. |
| | | * Retrieves the requested entry, trying the entry cache first, |
| | |
| | | indexInsertEntry(buffer, newEntry, newID); |
| | | } |
| | | |
| | | // Add the new ID to id2children and id2subtree of new apex parent entry. |
| | | if(isApexEntryMoved) |
| | | { |
| | | boolean isParent = true; |
| | | for (DN dn = getParentWithinBase(newEntry.getName()); dn != null; |
| | | dn = getParentWithinBase(dn)) |
| | | final DN parentDN = getParentWithinBase(newEntry.getName()); |
| | | if (parentDN != null) |
| | | { |
| | | EntryID parentID = dn2id.get(txn, dn); |
| | | ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | if(isParent) |
| | | { |
| | | buffer.put(id2children, parentIDKeyBytes, newID); |
| | | isParent = false; |
| | | } |
| | | buffer.put(id2subtree, parentIDKeyBytes, newID); |
| | | id2childrenCount.addDelta(txn, dn2id.get(txn, parentDN), 1); |
| | | } |
| | | } |
| | | } |
| | |
| | | |
| | | tail.next = new MovedEntry(newID, newEntry, !newID.equals(oldID)); |
| | | |
| | | // Remove the old ID from id2children and id2subtree of |
| | | // the old apex parent entry. |
| | | if(oldSuperiorDN != null && isApexEntryMoved) |
| | | { |
| | | boolean isParent = true; |
| | | for (DN dn = oldSuperiorDN; dn != null; dn = getParentWithinBase(dn)) |
| | | { |
| | | EntryID parentID = dn2id.get(txn, dn); |
| | | ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | if(isParent) |
| | | { |
| | | buffer.remove(id2children, parentIDKeyBytes, oldID); |
| | | isParent = false; |
| | | // Since entry has moved, oldSuperiorDN has lost a child |
| | | id2childrenCount.addDelta(txn, dn2id.get(txn, oldSuperiorDN), -1); |
| | | } |
| | | buffer.remove(id2subtree, parentIDKeyBytes, oldID); |
| | | } |
| | | |
| | | if (!newID.equals(oldID)) |
| | | { |
| | | id2childrenCount.addDelta(txn, newID, id2childrenCount.deleteCount(txn, oldID)); |
| | | } |
| | | |
| | | if (!newID.equals(oldID) || modifyDNOperation == null) |
| | | { |
| | | // All the subordinates will be renumbered so we have to rebuild |
| | | // id2c and id2s with the new ID. |
| | | ByteString oldIDKeyBytes = oldID.toByteString(); |
| | | buffer.remove(id2children, oldIDKeyBytes); |
| | | buffer.remove(id2subtree, oldIDKeyBytes); |
| | | |
| | | // Reindex the entry with the new ID. |
| | | indexRemoveEntry(buffer, oldEntry, oldID); |
| | | } |
| | |
| | | |
| | | tail.next = new MovedEntry(newID, newEntry, !newID.equals(oldID)); |
| | | |
| | | if(isApexEntryMoved) |
| | | { |
| | | // Remove the old ID from id2subtree of old apex superior entries. |
| | | for (DN dn = oldSuperiorDN; dn != null; dn = getParentWithinBase(dn)) |
| | | { |
| | | EntryID parentID = dn2id.get(txn, dn); |
| | | ByteString parentIDKeyBytes = parentID.toByteString(); |
| | | buffer.remove(id2subtree, parentIDKeyBytes, oldID); |
| | | } |
| | | } |
| | | |
| | | if (!newID.equals(oldID)) |
| | | { |
| | | // All the subordinates will be renumbered so we have to rebuild |
| | | // id2c and id2s with the new ID. |
| | | ByteString oldIDKeyBytes = oldID.toByteString(); |
| | | buffer.remove(id2children, oldIDKeyBytes); |
| | | buffer.remove(id2subtree, oldIDKeyBytes); |
| | | id2childrenCount.deleteCount(txn, oldID); |
| | | |
| | | // Reindex the entry with the new ID. |
| | | indexRemoveEntry(buffer, oldEntry, oldID); |
| | |
| | | } |
| | | |
| | | /** |
| | | * Get a count of the number of entries stored in this entry container. |
| | | * Get a count of the number of entries stored in this entry container including the baseDN |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @return The number of entries stored in this entry container. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | * @param txn |
| | | * a non null database transaction |
| | | * @return The number of entries stored in this entry container including the baseDN. |
| | | * @throws StorageRuntimeException |
| | | * If an error occurs in the database. |
| | | */ |
| | | long getEntryCount(ReadableTransaction txn) throws StorageRuntimeException |
| | | long getNumberOfEntriesInBaseDN() throws StorageRuntimeException |
| | | { |
| | | final EntryID entryID = dn2id.get(txn, baseDN); |
| | | if (entryID != null) |
| | | try |
| | | { |
| | | final EntryIDSet entryIDSet = id2subtree.get(txn, entryID.toByteString()); |
| | | long count = entryIDSet.size(); |
| | | if(count != Long.MAX_VALUE) |
| | | return storage.read(new ReadOperation<Long>() |
| | | { |
| | | // Add the base entry itself |
| | | return ++count; |
| | | @Override |
| | | public Long run(ReadableTransaction txn) throws Exception |
| | | { |
| | | final int baseDnIfExists = dn2id.get(txn, baseDN) != null ? 1 : 0; |
| | | return id2childrenCount.getTotalCount(txn) + baseDnIfExists; |
| | | } |
| | | else |
| | | { |
| | | // The count is not maintained. Fall back to the slow method |
| | | return id2entry.getRecordCount(txn); |
| | | }); |
| | | } |
| | | } |
| | | else |
| | | catch (Exception e) |
| | | { |
| | | // Base entry doesn't not exist so this entry container |
| | | // must not have any entries |
| | | return 0; |
| | | throw new StorageRuntimeException(e); |
| | | } |
| | | } |
| | | |
| | |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | if (config.isSubordinateIndexesEnabled() != cfg.isSubordinateIndexesEnabled()) |
| | | { |
| | | openSubordinateIndexes(txn, cfg); |
| | | } |
| | | |
| | | if (config.getIndexEntryLimit() != cfg.getIndexEntryLimit()) |
| | | { |
| | | if (id2children.setIndexEntryLimit(cfg.getIndexEntryLimit())) |
| | | { |
| | | ccr.setAdminActionRequired(true); |
| | | ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(id2children.getName())); |
| | | } |
| | | |
| | | if (id2subtree.setIndexEntryLimit(cfg.getIndexEntryLimit())) |
| | | { |
| | | ccr.setAdminActionRequired(true); |
| | | ccr.addMessage(NOTE_JEB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(id2subtree.getName())); |
| | | } |
| | | } |
| | | |
| | | DataConfig entryDataConfig = new DataConfig(cfg.isEntriesCompressed(), |
| | | cfg.isCompactEncoding(), rootContainer.getCompressedSchema()); |
| | | id2entry.setDataConfig(entryDataConfig); |
| | |
| | | databases.add(dn2id); |
| | | databases.add(id2entry); |
| | | databases.add(dn2uri); |
| | | if (config.isSubordinateIndexesEnabled()) |
| | | { |
| | | databases.add(id2children); |
| | | databases.add(id2subtree); |
| | | } |
| | | databases.add(id2childrenCount); |
| | | databases.add(state); |
| | | |
| | | for (AttributeIndex index : attrIndexMap.values()) |
| | |
| | | return null; |
| | | } |
| | | |
| | | /** Opens the id2children and id2subtree indexes. */ |
| | | private void openSubordinateIndexes(WriteableTransaction txn, PluggableBackendCfg cfg) |
| | | { |
| | | if (cfg.isSubordinateIndexesEnabled()) |
| | | { |
| | | TreeName name = getIndexName(ID2CHILDREN_DATABASE_NAME); |
| | | id2children = new DefaultIndex(name, state, config.getIndexEntryLimit(), true, txn, this); |
| | | id2children.open(txn); |
| | | if (!id2children.isTrusted()) |
| | | { |
| | | logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, name); |
| | | } |
| | | |
| | | name = getIndexName(ID2SUBTREE_DATABASE_NAME); |
| | | id2subtree = new DefaultIndex(name, state, config.getIndexEntryLimit(), true, txn, this); |
| | | id2subtree.open(txn); |
| | | if (!id2subtree.isTrusted()) |
| | | { |
| | | logger.info(NOTE_JEB_INDEX_ADD_REQUIRES_REBUILD, name); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | // Disabling subordinate indexes. Use a null index and ensure that |
| | | // future attempts to use the real indexes will fail. |
| | | id2children = openNewNullIndex(txn, ID2CHILDREN_DATABASE_NAME); |
| | | id2subtree = openNewNullIndex(txn, ID2SUBTREE_DATABASE_NAME); |
| | | logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, cfg.getBackendId()); |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Checks if any modifications apply to this indexed attribute. |
| | | * @param index the indexed attributes. |
| | |
| | | { |
| | | return new long[] { entryIDs[0], entryIDs[entryIDs.length - 1] }; |
| | | } |
| | | else |
| | | { |
| | | return NO_ENTRY_IDS_RANGE; |
| | | } |
| | | } |
| | | |
| | | @Override |
| | | public long[] getIDs() |
| | |
| | | private static final class UndefinedImpl implements EntryIDSetImplementor |
| | | { |
| | | /** |
| | | * The number of entry IDs in the set if the size is being maintained, otherwise Long.MAX_VALUE |
| | | */ |
| | | private long undefinedSize; |
| | | |
| | | /** |
| | | * The database key containing this set, if the set was constructed directly from the database. |
| | | */ |
| | | private final ByteSequence databaseKey; |
| | | |
| | | UndefinedImpl(ByteSequence key, long size) |
| | | UndefinedImpl(ByteSequence key) |
| | | { |
| | | databaseKey = checkNotNull(key, "key must not be null"); |
| | | undefinedSize = size; |
| | | } |
| | | |
| | | @Override |
| | | public long size() |
| | | { |
| | | return undefinedSize; |
| | | return Long.MAX_VALUE; |
| | | } |
| | | |
| | | @Override |
| | |
| | | { |
| | | buffer.append("[NOT-INDEXED]"); |
| | | } |
| | | else if (maintainUndefinedSize()) |
| | | { |
| | | buffer.append("[LIMIT-EXCEEDED:").append(undefinedSize).append("]"); |
| | | } |
| | | else |
| | | { |
| | | buffer.append("[LIMIT-EXCEEDED]"); |
| | | } |
| | | } |
| | | |
| | | private boolean maintainUndefinedSize() |
| | | { |
| | | return undefinedSize != Long.MAX_VALUE; |
| | | } |
| | | |
| | | @Override |
| | | public boolean isDefined() |
| | | { |
| | |
| | | @Override |
| | | public boolean add(EntryID entryID) |
| | | { |
| | | if (maintainUndefinedSize()) |
| | | { |
| | | undefinedSize++; |
| | | } |
| | | return true; |
| | | } |
| | | |
| | | @Override |
| | | public boolean remove(EntryID entryID) |
| | | { |
| | | if (maintainUndefinedSize() && undefinedSize > 0) |
| | | { |
| | | undefinedSize--; |
| | | } |
| | | return true; |
| | | } |
| | | |
| | |
| | | @Override |
| | | public void addAll(EntryIDSet that) |
| | | { |
| | | // Assume there are no overlap between IDs in that set with this set |
| | | if (maintainUndefinedSize()) |
| | | { |
| | | undefinedSize += that.size(); |
| | | } |
| | | } |
| | | |
| | | @Override |
| | | public void removeAll(EntryIDSet that) |
| | | { |
| | | // Assume all IDs in the given set exists in this set. |
| | | if (maintainUndefinedSize()) |
| | | { |
| | | undefinedSize = Math.max(0, undefinedSize - that.size()); |
| | | } |
| | | } |
| | | |
| | | @Override |
| | |
| | | else if ((value.byteAt(0) & 0x80) == 0x80) |
| | | { |
| | | // Entry limit has exceeded and there is an encoded undefined set size. |
| | | return newUndefinedSetWithSize(key, decodeUndefinedSize(value)); |
| | | return newUndefinedSetWithKey(key); |
| | | } |
| | | else |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | private int getEstimatedSize(EntryIDSet idSet) |
| | | private static int getEstimatedSize(EntryIDSet idSet) |
| | | { |
| | | if (idSet.isDefined()) |
| | | { |
| | | return idSet.getIDs().length * LONG_SIZE; |
| | | } |
| | | else |
| | | { |
| | | return LONG_SIZE; |
| | | } |
| | | return idSet.isDefined() ? idSet.getIDs().length * LONG_SIZE : LONG_SIZE; |
| | | } |
| | | |
| | | private long[] decodeRaw(ByteSequenceReader reader, int nbEntriesToDecode) |
| | | private static long[] decodeRaw(ByteSequenceReader reader, int nbEntriesToDecode) |
| | | { |
| | | checkNotNull(reader, "builder must not be null"); |
| | | Reject.ifFalse(nbEntriesToDecode >= 0, "nbEntriesToDecode must be >= 0"); |
| | |
| | | return ids; |
| | | } |
| | | |
| | | private ByteStringBuilder append(ByteStringBuilder builder, EntryIDSet idSet) |
| | | private static ByteStringBuilder append(ByteStringBuilder builder, EntryIDSet idSet) |
| | | { |
| | | checkNotNull(idSet, "idSet must not be null"); |
| | | checkNotNull(builder, "builder must not be null"); |
| | |
| | | } |
| | | return builder; |
| | | } |
| | | else |
| | | { |
| | | // Set top bit. |
| | | return builder.append(idSet.size() | Long.MIN_VALUE); |
| | | } |
| | | } |
| | | |
| | | private static long decodeUndefinedSize(ByteSequence bytes) |
| | | { |
| | | // remove top bit |
| | | return bytes.length() == LONG_SIZE ? bytes.asReader().getLong() & Long.MAX_VALUE : Long.MAX_VALUE; |
| | | return builder.append((byte) 0x80); |
| | | } |
| | | } |
| | | |
| | |
| | | { |
| | | checkNotNull(key, "key must not be null"); |
| | | checkNotNull(value, "value must not be null"); |
| | | |
| | | if (value.byteAt(0) == UNDEFINED_SET) |
| | | { |
| | | return newUndefinedSetWithKey(key); |
| | | } |
| | | final ByteSequenceReader reader = value.asReader(); |
| | | if ( reader.get() == UNDEFINED_SET) { |
| | | return newUndefinedSetWithSize(key, reader.getLong()); |
| | | } else { |
| | | reader.rewind(); |
| | | return newDefinedSet(decodeRaw(reader, (int) reader.getCompactUnsigned())); |
| | | } |
| | | } |
| | | |
| | | private ByteStringBuilder append(ByteStringBuilder builder, EntryIDSet idSet) |
| | | private static ByteStringBuilder append(ByteStringBuilder builder, EntryIDSet idSet) |
| | | { |
| | | checkNotNull(idSet, "idSet must not be null"); |
| | | checkNotNull(builder, "builder must not be null"); |
| | |
| | | else |
| | | { |
| | | builder.append(UNDEFINED_SET); |
| | | builder.append(idSet.size()); |
| | | } |
| | | return builder; |
| | | } |
| | | |
| | | private int getEstimatedSize(EntryIDSet idSet) |
| | | private static int getEstimatedSize(EntryIDSet idSet) |
| | | { |
| | | checkNotNull(idSet, "idSet must not be null"); |
| | | return idSet.getIDs().length * LONG_SIZE + INT_SIZE; |
| | | } |
| | | |
| | | private long[] decodeRaw(ByteSequenceReader reader, int nbEntriesToDecode) |
| | | private static long[] decodeRaw(ByteSequenceReader reader, int nbEntriesToDecode) |
| | | { |
| | | checkNotNull(reader, "reader must not be null"); |
| | | Reject.ifFalse(nbEntriesToDecode >= 0, "nbEntriesToDecode must be >= 0"); |
| | | |
| | | if ( nbEntriesToDecode == 0 ) { |
| | | return EMPTY_LONG_ARRAY; |
| | | } else { |
| | | } |
| | | final long ids[] = new long[nbEntriesToDecode]; |
| | | ids[0] = reader.getCompactUnsigned(); |
| | | for(int i = 1 ; i < nbEntriesToDecode ; i++) { |
| | |
| | | return ids; |
| | | } |
| | | } |
| | | } |
| | | |
| | | static EntryIDSet newUndefinedSet() |
| | | { |
| | | return new EntryIDSet(new UndefinedImpl(NO_KEY, Long.MAX_VALUE)); |
| | | return newUndefinedSetWithKey(NO_KEY); |
| | | } |
| | | |
| | | static EntryIDSet newUndefinedSetWithKey(ByteSequence key) |
| | | { |
| | | return newUndefinedSetWithSize(key, Long.MAX_VALUE); |
| | | } |
| | | |
| | | static EntryIDSet newUndefinedSetWithSize(ByteSequence key, long undefinedSize) |
| | | { |
| | | return new EntryIDSet(new UndefinedImpl(key, undefinedSize)); |
| | | return new EntryIDSet(new UndefinedImpl(key)); |
| | | } |
| | | |
| | | /** |
| | |
| | | |
| | | if (containsUndefinedSet) |
| | | { |
| | | return newUndefinedSetWithSize(null, count); |
| | | return newUndefinedSet(); |
| | | } |
| | | |
| | | boolean needSort = false; |
| | |
| | | { |
| | | return newDefinedSet(n1); |
| | | } |
| | | else |
| | | { |
| | | return newDefinedSet(Arrays.copyOf(n1, j)); |
| | | } |
| | | } |
| | | |
| | | private EntryIDSetImplementor concreteImpl; |
| | | |
| | |
| | | // performed by the implementation. |
| | | concreteImpl = new DefinedImpl(that.getIDs()); |
| | | } else { |
| | | concreteImpl = new UndefinedImpl(NO_KEY, that.size()); |
| | | concreteImpl = new UndefinedImpl(NO_KEY); |
| | | } |
| | | return; |
| | | } |
| | |
| | | return concreteImpl.getRange(); |
| | | } |
| | | |
| | | static long addWithoutOverflow(long a, long b) { |
| | | /** a and b must be > 0 */ |
| | | final long result = a + b; |
| | | return result >= 0 ? result : Long.MAX_VALUE; |
| | | } |
| | | |
| | | private static long[] mergeOverlappingEntryIDSet(long set1[], long set2[]) |
| | | { |
| | | final long[] a, b; |
| | |
| | | { |
| | | return Arrays.copyOf(newEntryIDs, targetIndex); |
| | | } |
| | | else |
| | | { |
| | | return newEntryIDs; |
| | | } |
| | | } |
| | | |
| | | private static int copyRemainder(long[] sourceIDSet, final long[] newEntryIDs, int offset, int remainerIndex) |
| | | { |
| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * Copyright 2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ByteStringBuilder; |
| | | import org.forgerock.util.Reject; |
| | | import org.forgerock.util.promise.Function; |
| | | import org.forgerock.util.promise.NeverThrowsException; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.UpdateFunction; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | |
| | | /** |
| | | * Store a counter associated to a key. Counter value is sharded amongst multiple database key to allow concurrent |
| | | * update without contention (at the price of a slower read). |
| | | */ |
| | | final class ID2Count extends AbstractDatabaseContainer |
| | | { |
| | | /** |
| | | * Must be a power of 2 @see <a href="http://en.wikipedia.org/wiki/Modulo_operation#Performance_issues">Performance |
| | | * issues</a> |
| | | */ |
| | | private static final long SHARD_COUNT = 4096; |
| | | private static final int LONG_SIZE = Long.SIZE / Byte.SIZE; |
| | | private static final EntryID TOTAL_COUNT_ENTRY_ID = new EntryID(ByteStringBuilder.COMPACTED_MAX_VALUE); |
| | | |
| | | ID2Count(TreeName name) |
| | | { |
| | | super(name); |
| | | } |
| | | |
| | | Cursor<EntryID, Long> openCursor(ReadableTransaction txn) { |
| | | return CursorTransformer.transformKeysAndValues(txn.openCursor(getName()), |
| | | new Function<ByteString, EntryID, Exception>() |
| | | { |
| | | @Override |
| | | public EntryID apply(ByteString value) throws Exception |
| | | { |
| | | return new EntryID(value.asReader().getCompactUnsigned()); |
| | | } |
| | | }, new CursorTransformer.ValueTransformer<ByteString, ByteString, Long, NeverThrowsException>() |
| | | { |
| | | @Override |
| | | public Long transform(ByteString key, ByteString value) throws NeverThrowsException |
| | | { |
| | | return value.toLong(); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | /** |
| | | * Add a value to the counter associated to the given key |
| | | * @param txn Database transaction |
| | | * @param entryID The entryID identifying to the counter |
| | | * @param delta The value to add. Can be negative to decrease counter value. |
| | | */ |
| | | void addDelta(WriteableTransaction txn, EntryID entryID, final long delta) |
| | | { |
| | | Reject.ifTrue(entryID.longValue() >= TOTAL_COUNT_ENTRY_ID.longValue(), "EntryID overflow."); |
| | | |
| | | addToCounter(txn, entryID, delta); |
| | | addToCounter(txn, TOTAL_COUNT_ENTRY_ID, delta); |
| | | } |
| | | |
| | | private void addToCounter(WriteableTransaction txn, EntryID entryID, final long delta) |
| | | { |
| | | final long bucket = (Thread.currentThread().getId() & (SHARD_COUNT - 1)); |
| | | final ByteSequence shardedKey = getKeyFromEntryIDAndBucket(entryID, bucket); |
| | | txn.update(getName(), shardedKey, new UpdateFunction() |
| | | { |
| | | @Override |
| | | public ByteSequence computeNewValue(ByteSequence oldValue) |
| | | { |
| | | final long currentValue = oldValue != null ? oldValue.asReader().getLong() : 0; |
| | | return ByteString.valueOf(currentValue + delta); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | /** |
| | | * Get the counter value for the specified key |
| | | * @param txn The database transaction |
| | | * @param entryID The entryID identifying to the counter |
| | | * @return Value of the counter. 0 if no counter is associated yet. |
| | | */ |
| | | long getCount(ReadableTransaction txn, EntryID entryID) |
| | | { |
| | | long counterValue = 0; |
| | | try(final Cursor<EntryID, Long> cursor = openCursor(txn)) { |
| | | cursor.positionToKeyOrNext(getKeyFromEntryID(entryID)); |
| | | while (cursor.isDefined() && cursor.getKey().equals(entryID)) |
| | | { |
| | | counterValue += cursor.getValue().longValue(); |
| | | cursor.next(); |
| | | } |
| | | } |
| | | |
| | | return counterValue; |
| | | } |
| | | |
| | | private static final ByteSequence getKeyFromEntryID(EntryID entryID) { |
| | | return new ByteStringBuilder(LONG_SIZE).appendCompactUnsigned(entryID.longValue()); |
| | | } |
| | | |
| | | private static final ByteSequence getKeyFromEntryIDAndBucket(EntryID entryID, long bucket) { |
| | | return new ByteStringBuilder(LONG_SIZE + LONG_SIZE).appendCompactUnsigned(entryID.longValue()) |
| | | .appendCompactUnsigned(bucket); |
| | | } |
| | | |
| | | /** |
| | | * Get the total counter value. The total counter maintain the sum of all |
| | | * the counter contained in this tree. |
| | | * @param txn The database transaction |
| | | * @return Sum of all the counter contained in this tree |
| | | */ |
| | | long getTotalCount(ReadableTransaction txn) |
| | | { |
| | | return getCount(txn, TOTAL_COUNT_ENTRY_ID); |
| | | } |
| | | |
| | | /** |
| | | * Delete the counter associated to the given key |
| | | * @param txn The database transaction |
| | | * @param entryID The entryID identifying the counter |
| | | * @return Value of the counter before it's deletion. |
| | | */ |
| | | long deleteCount(WriteableTransaction txn, EntryID entryID) |
| | | { |
| | | long counterValue = 0; |
| | | try(final Cursor<ByteString, ByteString> cursor = txn.openCursor(getName())) { |
| | | final ByteSequence encodedEntryID = getKeyFromEntryID(entryID); |
| | | if (cursor.positionToKeyOrNext(encodedEntryID)) { |
| | | while (cursor.getKey().startsWith(encodedEntryID)) |
| | | { |
| | | counterValue += cursor.getValue().asReader().getLong(); |
| | | txn.delete(getName(), cursor.getKey()); |
| | | cursor.next(); |
| | | } |
| | | } |
| | | } |
| | | addToCounter(txn, TOTAL_COUNT_ENTRY_ID, -counterValue); |
| | | |
| | | return counterValue; |
| | | } |
| | | |
| | | } |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.forgerock.util.Utils.*; |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.core.DirectoryServer.*; |
| | |
| | | import org.forgerock.opendj.ldap.ByteStringBuilder; |
| | | import org.forgerock.opendj.ldap.DecodeException; |
| | | import org.opends.server.api.CompressedSchema; |
| | | import org.opends.server.backends.pluggable.spi.Cursor; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.StorageRuntimeException; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | |
| | | return get0(id, txn.read(getName(), id.toByteString())); |
| | | } |
| | | |
| | | /** |
| | | * Check that a record entry exists in the entry database. |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @param id The entry ID which forms the key. |
| | | * @return True if an entry with entryID exists |
| | | * @throws DirectoryException If a problem occurs while getting the entry. |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | public boolean containsEntryID(ReadableTransaction txn, EntryID id) |
| | | { |
| | | checkNotNull(txn, "txn must not be null"); |
| | | checkNotNull(id, "id must not be null"); |
| | | try(final Cursor<ByteString, ByteString> cursor = txn.openCursor(getName())) { |
| | | return cursor.positionToKey(id.toByteString()); |
| | | } |
| | | } |
| | | |
| | | private Entry get0(EntryID id, ByteString value) throws DirectoryException |
| | | { |
| | | if (value == null) |
| | |
| | | private final ByteSequence key; |
| | | /** The index entry limit size. */ |
| | | private final int indexEntryLimitSize; |
| | | /** Set to true if a count of ids above the index entry limit should be kept. */ |
| | | private final boolean maintainCount; |
| | | |
| | | /** |
| | | * Create an import ID set managing the entry limit of the provided EntryIDSet. |
| | | * |
| | | * @param key The key associated to this ID set |
| | | * @param entryIDSet The entryIDSet that will be managed by this object |
| | | * @param limit The index entry limit or 0 if unlimited. |
| | | * @param maintainCount whether to maintain the count when size is undefined. |
| | | * @throws NullPointerException if key or entryIDSet is null |
| | | * @throws IllegalArgumentException if limit is < 0 |
| | | * @param key |
| | | * The key associated to this ID set |
| | | * @param entryIDSet |
| | | * The entryIDSet that will be managed by this object |
| | | * @param limit |
| | | * The index entry limit or 0 if unlimited. |
| | | * @throws NullPointerException |
| | | * if key or entryIDSet is null |
| | | * @throws IllegalArgumentException |
| | | * if limit is < 0 |
| | | */ |
| | | public ImportIDSet(ByteSequence key, EntryIDSet entryIDSet, int limit, boolean maintainCount) |
| | | public ImportIDSet(ByteSequence key, EntryIDSet entryIDSet, int limit) |
| | | { |
| | | checkNotNull(key, "key must not be null"); |
| | | checkNotNull(entryIDSet, "entryIDSet must not be null"); |
| | |
| | | this.entryIDSet = entryIDSet; |
| | | // FIXME: What to do if entryIDSet.size()> limit yet ? |
| | | this.indexEntryLimitSize = limit == 0 ? Integer.MAX_VALUE : limit; |
| | | this.maintainCount = maintainCount; |
| | | } |
| | | |
| | | /** |
| | |
| | | entryIDSet = newUndefinedSetWithKey(key); |
| | | } |
| | | |
| | | private void setUndefinedWithSize(final long newSize) { |
| | | entryIDSet = maintainCount ? newUndefinedSetWithSize(key, newSize) : newUndefinedSetWithKey(key); |
| | | } |
| | | |
| | | /** |
| | | * @param entryID The entry ID to add to an import ID set. |
| | | * @throws NullPointerException if entryID is null |
| | |
| | | */ |
| | | void addEntryID(long entryID) |
| | | { |
| | | Reject.ifTrue(entryID < 0, "entryID must always be positive"); |
| | | if (isDefined() && size() + 1 > indexEntryLimitSize) { |
| | | setUndefinedWithSize(size() + 1); |
| | | } else if (isDefined() || maintainCount) { |
| | | Reject.ifTrue(entryID < 0, "entryID must be positive"); |
| | | if (!isDefined()) { |
| | | return; |
| | | } |
| | | if (size() + 1 > indexEntryLimitSize) { |
| | | entryIDSet = newUndefinedSetWithKey(key); |
| | | } else { |
| | | entryIDSet.add(new EntryID(entryID)); |
| | | } |
| | | } |
| | |
| | | void remove(ImportIDSet importIdSet) |
| | | { |
| | | checkNotNull(importIdSet, "importIdSet must not be null"); |
| | | |
| | | if (!importIdSet.isDefined()) { |
| | | setUndefined(); |
| | | } else if (isDefined() || maintainCount) { |
| | | } else if (isDefined()) { |
| | | entryIDSet.removeAll(importIdSet.entryIDSet); |
| | | } |
| | | } |
| | |
| | | |
| | | if (!definedBeforeMerge || !importIdSet.isDefined() || mergedSize > indexEntryLimitSize) |
| | | { |
| | | setUndefinedWithSize(mergedSize); |
| | | entryIDSet = newUndefinedSetWithKey(key); |
| | | return definedBeforeMerge; |
| | | } |
| | | else if (isDefined() || maintainCount) |
| | | else if (isDefined()) |
| | | { |
| | | entryIDSet.addAll(importIdSet.entryIDSet); |
| | | } |
| | | return false; |
| | | } |
| | | |
| | | private static long addWithoutOverflow(long a, long b) { |
| | | /** a and b must be > 0 */ |
| | | final boolean willAdditionOverflow = (~(a ^ b) & (a ^ (a + b))) < 0; |
| | | if (willAdditionOverflow) { |
| | | return Long.MAX_VALUE; |
| | | } |
| | | return a + b; |
| | | } |
| | | |
| | | |
| | | /** |
| | | * @return The current size of an import ID set. |
| | | * @throws IllegalStateException if this set is undefined |
| | |
| | | import org.forgerock.opendj.ldap.ByteSequenceReader; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ByteStringBuilder; |
| | | import org.forgerock.opendj.ldap.ResultCode; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | | import org.forgerock.util.Utils; |
| | | import org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType; |
| | |
| | | |
| | | /** Used to shutdown import if an error occurs in phase one. */ |
| | | private volatile boolean isCanceled; |
| | | private volatile boolean isPhaseOneDone; |
| | | |
| | | /** Number of phase one buffers. */ |
| | | private int phaseOneBufferCount; |
| | |
| | | this.serverContext = serverContext; |
| | | this.tmpEnv = null; |
| | | this.threadCount = 1; |
| | | this.rebuildManager = new RebuildIndexManager(rebuildConfig, cfg); |
| | | this.rebuildManager = new RebuildIndexManager(rootContainer.getStorage(), rebuildConfig, cfg); |
| | | this.indexCount = rebuildManager.getIndexCount(); |
| | | this.clearedBackend = false; |
| | | this.scratchFileWriterList = |
| | |
| | | */ |
| | | } |
| | | |
| | | private File getTempDir(PluggableBackendCfg backendCfg, String tmpDirectory) |
| | | private static File getTempDir(PluggableBackendCfg backendCfg, String tmpDirectory) |
| | | { |
| | | File parentDir; |
| | | if (tmpDirectory != null) |
| | |
| | | return new File(parentDir, backendCfg.getBackendId()); |
| | | } |
| | | |
| | | private int getTotalIndexCount(PluggableBackendCfg backendCfg) |
| | | throws ConfigException |
| | | private static int getTotalIndexCount(PluggableBackendCfg backendCfg) throws ConfigException |
| | | { |
| | | int indexes = 2; // dn2id, dn2uri |
| | | for (String indexName : backendCfg.listBackendIndexes()) |
| | |
| | | } |
| | | } |
| | | |
| | | private void clearSuffix(EntryContainer entryContainer) |
| | | private static void clearSuffix(EntryContainer entryContainer) |
| | | { |
| | | entryContainer.lock(); |
| | | entryContainer.clear(); |
| | | entryContainer.unlock(); |
| | | } |
| | | |
| | | private boolean isAnyNotEqualAndAncestorOf(List<DN> dns, DN childDN) |
| | | private static boolean isAnyNotEqualAndAncestorOf(List<DN> dns, DN childDN) |
| | | { |
| | | for (DN dn : dns) |
| | | { |
| | |
| | | return true; |
| | | } |
| | | |
| | | private boolean isAnyAncestorOf(List<DN> dns, DN childDN) |
| | | private static boolean isAnyAncestorOf(List<DN> dns, DN childDN) |
| | | { |
| | | for (DN dn : dns) |
| | | { |
| | |
| | | |
| | | final long startTime = System.currentTimeMillis(); |
| | | importPhaseOne(); |
| | | isPhaseOneDone = true; |
| | | final long phaseOneFinishTime = System.currentTimeMillis(); |
| | | |
| | | if (!skipDNValidation) |
| | |
| | | indexKeyQueueMap.clear(); |
| | | } |
| | | |
| | | private void scheduleAtFixedRate(ScheduledThreadPoolExecutor timerService, Runnable task) |
| | | private static void scheduleAtFixedRate(ScheduledThreadPoolExecutor timerService, Runnable task) |
| | | { |
| | | timerService.scheduleAtFixedRate(task, TIMER_INTERVAL, TIMER_INTERVAL, TimeUnit.MILLISECONDS); |
| | | } |
| | | |
| | | private void shutdownAll(ExecutorService... executorServices) throws InterruptedException |
| | | private static void shutdownAll(ExecutorService... executorServices) throws InterruptedException |
| | | { |
| | | for (ExecutorService executorService : executorServices) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | private void clearAll(Collection<?>... cols) |
| | | private static void clearAll(Collection<?>... cols) |
| | | { |
| | | for (Collection<?> col : cols) |
| | | { |
| | |
| | | private void importPhaseTwo() throws InterruptedException, ExecutionException |
| | | { |
| | | ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor(1); |
| | | scheduleAtFixedRate(timerService, new SecondPhaseProgressTask(reader.getEntriesRead())); |
| | | scheduleAtFixedRate(timerService, new SecondPhaseProgressTask()); |
| | | try |
| | | { |
| | | processIndexFiles(); |
| | |
| | | } |
| | | } |
| | | |
| | | private <T> void getAll(List<Future<T>> futures) throws InterruptedException, ExecutionException |
| | | private static <T> void getAll(List<Future<T>> futures) throws InterruptedException, ExecutionException |
| | | { |
| | | for (Future<?> result : futures) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | void processEntry(WriteableTransaction txn, Entry entry, Suffix suffix) |
| | | throws DirectoryException, StorageRuntimeException, InterruptedException |
| | | void processEntry(WriteableTransaction txn, Entry entry, Suffix suffix) throws DirectoryException, |
| | | StorageRuntimeException, InterruptedException |
| | | { |
| | | DN entryDN = entry.getName(); |
| | | DN2ID dn2id = suffix.getDN2ID(); |
| | |
| | | { |
| | | if (indexMgr.isDN2ID()) |
| | | { |
| | | return new ImportIDSet(record.getKey(), newDefinedSet(), 1, false); |
| | | return new ImportIDSet(record.getKey(), newDefinedSet(), 1); |
| | | } |
| | | |
| | | final Index index = indexIDToIndexMap.get(record.getIndexID()); |
| | | return new ImportIDSet(record.getKey(), newDefinedSet(), index.getIndexEntryLimit(), index.getMaintainCount()); |
| | | return new ImportIDSet(record.getKey(), newDefinedSet(), index.getIndexEntryLimit()); |
| | | } |
| | | |
| | | private void addToDB(WriteableTransaction txn, int indexID, ImportIDSet insertSet, ImportIDSet deleteSet) |
| | |
| | | } |
| | | if (dnState.checkParent(txn, idSet)) |
| | | { |
| | | dnState.writeToDN2ID(txn, idSet); |
| | | dnState.writeToDN2ID(txn, idSet.getKey()); |
| | | } |
| | | } |
| | | |
| | |
| | | private final EntryContainer entryContainer; |
| | | private final TreeName dn2id; |
| | | private final TreeMap<ByteString, EntryID> parentIDMap = new TreeMap<ByteString, EntryID>(); |
| | | private final Map<ByteString, ImportIDSet> id2childTree = new TreeMap<ByteString, ImportIDSet>(); |
| | | private final Map<ByteString, ImportIDSet> id2subtreeTree = new TreeMap<ByteString, ImportIDSet>(); |
| | | private final int childLimit, subTreeLimit; |
| | | private final boolean childDoCount, subTreeDoCount; |
| | | private final Map<EntryID, AtomicLong> id2childrenCountTree = new TreeMap<EntryID, AtomicLong>(); |
| | | private ByteSequence parentDN; |
| | | private final ByteStringBuilder lastDN = new ByteStringBuilder(); |
| | | private EntryID parentID, lastID, entryID; |
| | |
| | | { |
| | | this.entryContainer = entryContainer; |
| | | dn2id = entryContainer.getDN2ID().getName(); |
| | | final Index id2c = entryContainer.getID2Children(); |
| | | childLimit = id2c.getIndexEntryLimit(); |
| | | childDoCount = id2c.getMaintainCount(); |
| | | final Index id2s = entryContainer.getID2Subtree(); |
| | | subTreeLimit = id2s.getIndexEntryLimit(); |
| | | subTreeDoCount = id2s.getMaintainCount(); |
| | | } |
| | | |
| | | private ByteSequence getParent(ByteSequence dn) |
| | |
| | | return true; |
| | | } |
| | | |
| | | private void id2child(WriteableTransaction txn, EntryID childID) throws DirectoryException |
| | | private AtomicLong getId2childrenCounter() |
| | | { |
| | | if (parentID == null) |
| | | AtomicLong counter = id2childrenCountTree.get(parentID); |
| | | if (counter == null) |
| | | { |
| | | throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, ERR_PARENT_ENTRY_IS_MISSING.get()); |
| | | counter = new AtomicLong(); |
| | | id2childrenCountTree.put(parentID, counter); |
| | | } |
| | | |
| | | getId2childtreeImportIDSet().addEntryID(childID); |
| | | if (id2childTree.size() > DN_STATE_CACHE_SIZE) |
| | | { |
| | | flushToDB(txn, id2childTree.values(), entryContainer.getID2Children(), true); |
| | | } |
| | | } |
| | | |
| | | private ImportIDSet getId2childtreeImportIDSet() |
| | | { |
| | | final ByteString parentIDBytes = parentID.toByteString(); |
| | | ImportIDSet idSet = id2childTree.get(parentIDBytes); |
| | | if (idSet == null) |
| | | { |
| | | idSet = new ImportIDSet(parentIDBytes, newDefinedSet(), childLimit, childDoCount); |
| | | id2childTree.put(parentIDBytes, idSet); |
| | | } |
| | | return idSet; |
| | | } |
| | | |
| | | private void id2SubTree(WriteableTransaction txn, EntryID childID) throws DirectoryException |
| | | { |
| | | if (parentID == null) |
| | | { |
| | | throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, ERR_PARENT_ENTRY_IS_MISSING.get()); |
| | | } |
| | | |
| | | getId2subtreeImportIDSet(parentID).addEntryID(childID); |
| | | // TODO: |
| | | // Instead of doing this, |
| | | // we can just walk to parent cache if available |
| | | for (ByteSequence dn = getParent(parentDN); dn != null; dn = getParent(dn)) |
| | | { |
| | | EntryID nodeID = getParentID(txn, dn); |
| | | if (nodeID != null) |
| | | { |
| | | getId2subtreeImportIDSet(nodeID).addEntryID(childID); |
| | | } |
| | | // else we have a missing parent. Maybe parent checking was turned off? |
| | | // Just ignore. |
| | | } |
| | | if (id2subtreeTree.size() > DN_STATE_CACHE_SIZE) |
| | | { |
| | | flushToDB(txn, id2subtreeTree.values(), entryContainer.getID2Subtree(), true); |
| | | } |
| | | } |
| | | |
| | | private EntryID getParentID(ReadableTransaction txn, ByteSequence dn) throws StorageRuntimeException |
| | | { |
| | | return bypassCacheForAppendMode() ? get(txn, dn2id, dn) : parentIDMap.get(dn); |
| | | return counter; |
| | | } |
| | | |
| | | /** |
| | |
| | | return value != null ? new EntryID(value) : null; |
| | | } |
| | | |
| | | private ImportIDSet getId2subtreeImportIDSet(EntryID entryID) |
| | | public void writeToDN2ID(WriteableTransaction txn, ByteSequence key) throws DirectoryException |
| | | { |
| | | ByteString entryIDBytes = entryID.toByteString(); |
| | | ImportIDSet idSet = id2subtreeTree.get(entryIDBytes); |
| | | if (idSet == null) |
| | | { |
| | | idSet = new ImportIDSet(entryIDBytes, newDefinedSet(), subTreeLimit, subTreeDoCount); |
| | | id2subtreeTree.put(entryIDBytes, idSet); |
| | | } |
| | | return idSet; |
| | | } |
| | | |
| | | public void writeToDN2ID(WriteableTransaction txn, ImportIDSet idSet) throws DirectoryException |
| | | { |
| | | txn.put(dn2id, idSet.getKey(), entryID.toByteString()); |
| | | txn.put(dn2id, key, entryID.toByteString()); |
| | | indexMgr.addTotDNCount(1); |
| | | if (parentDN != null) |
| | | if (parentID != null) |
| | | { |
| | | id2child(txn, entryID); |
| | | id2SubTree(txn, entryID); |
| | | incrementChildrenCounter(txn); |
| | | } |
| | | } |
| | | |
| | | public void flush(WriteableTransaction txn) |
| | | private void incrementChildrenCounter(WriteableTransaction txn) |
| | | { |
| | | flushToDB(txn, id2childTree.values(), entryContainer.getID2Children(), false); |
| | | flushToDB(txn, id2subtreeTree.values(), entryContainer.getID2Subtree(), false); |
| | | final AtomicLong counter = getId2childrenCounter(); |
| | | counter.incrementAndGet(); |
| | | if (id2childrenCountTree.size() > DN_STATE_CACHE_SIZE) |
| | | { |
| | | flush(txn); |
| | | } |
| | | } |
| | | |
| | | private void flushToDB(WriteableTransaction txn, Collection<ImportIDSet> idSets, Index index, boolean clearIDSets) |
| | | private void flush(WriteableTransaction txn) |
| | | { |
| | | for (ImportIDSet idSet : idSets) |
| | | for (Map.Entry<EntryID, AtomicLong> childrenCounter : id2childrenCountTree.entrySet()) |
| | | { |
| | | index.importPut(txn, idSet); |
| | | entryContainer.getID2ChildrenCount() |
| | | .addDelta(txn, childrenCounter.getKey(), childrenCounter.getValue().get()); |
| | | } |
| | | if (clearIDSets) |
| | | { |
| | | idSets.clear(); |
| | | } |
| | | id2childrenCountTree.clear(); |
| | | } |
| | | } |
| | | } |
| | |
| | | * @param cfg |
| | | * The local DB configuration to use. |
| | | */ |
| | | public RebuildIndexManager(RebuildConfig rebuildConfig, PluggableBackendCfg cfg) |
| | | public RebuildIndexManager(Storage storage, RebuildConfig rebuildConfig, PluggableBackendCfg cfg) |
| | | { |
| | | super(null); |
| | | super(storage); |
| | | this.rebuildConfig = rebuildConfig; |
| | | this.cfg = cfg; |
| | | } |
| | |
| | | rebuildIndexMap(txn, false); |
| | | // falls through |
| | | case DEGRADED: |
| | | if (mode == RebuildMode.ALL |
| | | || !entryContainer.getID2Children().isTrusted() |
| | | || !entryContainer.getID2Subtree().isTrusted()) |
| | | if (mode == RebuildMode.ALL) |
| | | { |
| | | dn2id = entryContainer.getDN2ID(); |
| | | } |
| | |
| | | { |
| | | // dn2uri does not have a trusted status. |
| | | entryContainer.clearDatabase(txn, entryContainer.getDN2URI()); |
| | | } |
| | | |
| | | if (!onlyDegraded |
| | | || !entryContainer.getID2Children().isTrusted() |
| | | || !entryContainer.getID2Subtree().isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(txn, entryContainer.getDN2ID()); |
| | | entryContainer.clearDatabase(txn, entryContainer.getID2Children()); |
| | | entryContainer.clearDatabase(txn, entryContainer.getID2Subtree()); |
| | | entryContainer.clearDatabase(txn, entryContainer.getID2ChildrenCount()); |
| | | } |
| | | |
| | | for (Map.Entry<IndexKey, MatchingRuleIndex> mapEntry : indexMap.entrySet()) |
| | |
| | | { |
| | | try |
| | | { |
| | | if (dn2id != null) |
| | | { |
| | | EntryContainer ec = suffix.getEntryContainer(); |
| | | ec.getID2Children().setTrusted(txn, trusted); |
| | | ec.getID2Subtree().setTrusted(txn, trusted); |
| | | } |
| | | setTrusted(txn, indexMap.values(), trusted); |
| | | for (VLVIndex vlvIndex : vlvIndexes) |
| | | { |
| | |
| | | |
| | | private void rebuildIndexesPhaseTwo() throws InterruptedException, ExecutionException |
| | | { |
| | | final Timer timer = scheduleAtFixedRate(new SecondPhaseProgressTask(entriesProcessed.get())); |
| | | final Timer timer = scheduleAtFixedRate(new SecondPhaseProgressTask()); |
| | | try |
| | | { |
| | | processIndexFiles(); |
| | |
| | | { |
| | | /** The time in milliseconds of the previous progress report. */ |
| | | private long previousTime; |
| | | private long latestCount; |
| | | |
| | | /** |
| | | * Create a new import progress task. |
| | |
| | | * @param latestCount |
| | | * The latest count of entries processed in phase one. |
| | | */ |
| | | public SecondPhaseProgressTask(long latestCount) |
| | | public SecondPhaseProgressTask() |
| | | { |
| | | previousTime = System.currentTimeMillis(); |
| | | this.latestCount = latestCount; |
| | | } |
| | | |
| | | /** The action to be performed by this timer task. */ |
| | |
| | | |
| | | int getIndexEntryLimit(); |
| | | |
| | | boolean getMaintainCount(); |
| | | |
| | | // Ignores trusted state. |
| | | void importPut(WriteableTransaction txn, ImportIDSet idsToBeAdded); |
| | | |
| | |
| | | |
| | | /** |
| | | * A simple class representing a pair of added and deleted indexed IDs. Initially both addedIDs |
| | | * and deletedIDs are {@code null} indicating that that the whole record should be deleted. This |
| | | * state is only ever used when updating the id2children and id2subtree indexes when deleting an |
| | | * entry. |
| | | * and deletedIDs are {@code null} indicating that that the whole record should be deleted. |
| | | */ |
| | | private static class BufferedIndexValues |
| | | { |
| | |
| | | vlvIndex.updateIndex(txn, bufferedVLVValues.addedSortKeys, bufferedVLVValues.deletedSortKeys); |
| | | } |
| | | } |
| | | |
| | | final Index id2children = entryContainer.getID2Children(); |
| | | flushIndex(id2children, txn, bufferedIndexes.remove(id2children)); |
| | | |
| | | final Index id2subtree = entryContainer.getID2Subtree(); |
| | | final TreeMap<ByteString, BufferedIndexValues> bufferedValues = bufferedIndexes.remove(id2subtree); |
| | | if (bufferedValues != null) |
| | | { |
| | | /* |
| | | * OPENDJ-1375: add keys in reverse order to be consistent with single |
| | | * entry processing in add/delete processing. This is necessary in order |
| | | * to avoid deadlocks. |
| | | */ |
| | | flushIndex(id2subtree, txn, bufferedValues.descendingMap()); |
| | | } |
| | | } |
| | | |
| | | void put(Index index, ByteString key, EntryID entryID) |
| | |
| | | */ |
| | | static ByteString dnToDNKey(DN dn, int prefixRDNs) |
| | | { |
| | | final ByteStringBuilder builder = new ByteStringBuilder(); |
| | | final ByteStringBuilder builder = new ByteStringBuilder(128); |
| | | final int startSize = dn.size() - prefixRDNs - 1; |
| | | for (int i = startSize; i >= 0; i--) |
| | | { |
| | |
| | | ec.sharedLock.lock(); |
| | | try |
| | | { |
| | | entryCount += ec.getEntryCount(txn); |
| | | entryCount += ec.getNumberOfEntriesInBaseDN(); |
| | | } |
| | | finally |
| | | { |
| | |
| | | |
| | | |
| | | /** |
| | | * Sets the trusted status of all of the indexes, vlvIndexes, id2children |
| | | * and id2subtree indexes. |
| | | * Sets the trusted status of all of the indexes and vlvIndexes. |
| | | * |
| | | * @param txn a non null database transaction |
| | | * @param trusted True if the indexes should be trusted or false otherwise. |
| | |
| | | */ |
| | | public void setIndexesTrusted(WriteableTransaction txn, boolean trusted) throws StorageRuntimeException |
| | | { |
| | | entryContainer.getID2Children().setTrusted(txn, trusted); |
| | | entryContainer.getID2Subtree().setTrusted(txn, trusted); |
| | | for (AttributeIndex attributeIndex : entryContainer.getAttributeIndexes()) |
| | | { |
| | | setTrusted(txn, attributeIndex.getNameToIndexes().values(), trusted); |
| | |
| | | */ |
| | | String ID2CHILDREN_INDEX_NAME = "id2children"; |
| | | /** |
| | | * The name of the index associating an entry id to the number of immediate |
| | | * children below it. |
| | | */ |
| | | String ID2CHILDREN_COUNT_NAME = "id2childrencount"; |
| | | /** |
| | | * The name of the index associating an entry id to the entry id set of all |
| | | * its subordinates, i.e. the children, grand-children, grand-grand-children, |
| | | * .... |
| | |
| | | import java.util.HashSet; |
| | | import java.util.IdentityHashMap; |
| | | import java.util.Iterator; |
| | | import java.util.LinkedList; |
| | | import java.util.List; |
| | | import java.util.Map; |
| | | import java.util.Queue; |
| | | import java.util.Set; |
| | | import java.util.Timer; |
| | | import java.util.TimerTask; |
| | |
| | | /** The verify configuration. */ |
| | | private final VerifyConfig verifyConfig; |
| | | /** The root container used for the verify job. */ |
| | | private RootContainer rootContainer; |
| | | private final RootContainer rootContainer; |
| | | |
| | | /** The number of milliseconds between job progress reports. */ |
| | | private final long progressInterval = 10000; |
| | |
| | | |
| | | /** Indicates whether the DN database is to be verified. */ |
| | | private boolean verifyDN2ID; |
| | | /** Indicates whether the children database is to be verified. */ |
| | | private boolean verifyID2Children; |
| | | /** Indicates whether the subtree database is to be verified. */ |
| | | private boolean verifyID2Subtree; |
| | | /** Indicates whether the children count database is to be verified. */ |
| | | private boolean verifyID2ChildrenCount; |
| | | |
| | | /** The entry database. */ |
| | | private ID2Entry id2entry; |
| | | /** The DN database. */ |
| | | private DN2ID dn2id; |
| | | /** The children database. */ |
| | | private Index id2c; |
| | | /** The subtree database. */ |
| | | private Index id2s; |
| | | private ID2Count id2childrenCount; |
| | | |
| | | /** A list of the attribute indexes to be verified. */ |
| | | private final ArrayList<AttributeIndex> attrIndexList = new ArrayList<AttributeIndex>(); |
| | |
| | | * |
| | | * @param verifyConfig The verify configuration. |
| | | */ |
| | | VerifyJob(VerifyConfig verifyConfig) |
| | | VerifyJob(RootContainer rootContainer, VerifyConfig verifyConfig) |
| | | { |
| | | this.rootContainer = rootContainer; |
| | | this.verifyConfig = verifyConfig; |
| | | } |
| | | |
| | |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | * @throws DirectoryException If an error occurs while verifying the backend. |
| | | */ |
| | | long verifyBackend(final RootContainer rootContainer) throws StorageRuntimeException, |
| | | long verifyBackend() throws StorageRuntimeException, |
| | | DirectoryException |
| | | { |
| | | try |
| | |
| | | @Override |
| | | public Long run(ReadableTransaction txn) throws Exception |
| | | { |
| | | return verifyBackend0(txn, rootContainer); |
| | | return verifyBackend0(txn); |
| | | } |
| | | }); |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | private long verifyBackend0(ReadableTransaction txn, RootContainer rootContainer) |
| | | throws StorageRuntimeException, DirectoryException |
| | | private long verifyBackend0(ReadableTransaction txn) throws StorageRuntimeException, DirectoryException |
| | | { |
| | | this.rootContainer = rootContainer; |
| | | EntryContainer entryContainer = |
| | | rootContainer.getEntryContainer(verifyConfig.getBaseDN()); |
| | | |
| | |
| | | if (completeList.isEmpty() && cleanList.isEmpty()) |
| | | { |
| | | verifyDN2ID = true; |
| | | if (rootContainer.getConfiguration().isSubordinateIndexesEnabled()) |
| | | { |
| | | verifyID2Children = true; |
| | | verifyID2Subtree = true; |
| | | } |
| | | verifyID2ChildrenCount = true; |
| | | attrIndexList.addAll(entryContainer.getAttributeIndexes()); |
| | | } |
| | | else |
| | |
| | | { |
| | | verifyDN2ID = true; |
| | | } |
| | | else if ("id2children".equals(lowerName)) |
| | | if ("id2childrencount".equals(lowerName)) |
| | | { |
| | | if (rootContainer.getConfiguration().isSubordinateIndexesEnabled()) |
| | | { |
| | | verifyID2Children = true; |
| | | } |
| | | else |
| | | { |
| | | LocalizableMessage msg = NOTE_JEB_SUBORDINATE_INDEXES_DISABLED |
| | | .get(rootContainer.getConfiguration().getBackendId()); |
| | | throw new StorageRuntimeException(msg.toString()); |
| | | } |
| | | } |
| | | else if ("id2subtree".equals(lowerName)) |
| | | { |
| | | if (rootContainer.getConfiguration().isSubordinateIndexesEnabled()) |
| | | { |
| | | verifyID2Subtree = true; |
| | | } |
| | | else |
| | | { |
| | | LocalizableMessage msg = NOTE_JEB_SUBORDINATE_INDEXES_DISABLED |
| | | .get(rootContainer.getConfiguration().getBackendId()); |
| | | throw new StorageRuntimeException(msg.toString()); |
| | | } |
| | | verifyID2ChildrenCount = true; |
| | | } |
| | | else if(lowerName.startsWith("vlv.")) |
| | | { |
| | |
| | | // the entry entryContainer methods. |
| | | id2entry = entryContainer.getID2Entry(); |
| | | dn2id = entryContainer.getDN2ID(); |
| | | id2c = entryContainer.getID2Children(); |
| | | id2s = entryContainer.getID2Subtree(); |
| | | id2childrenCount = entryContainer.getID2ChildrenCount(); |
| | | |
| | | // Make a note of the time we started. |
| | | long startTime = System.currentTimeMillis(); |
| | |
| | | */ |
| | | private void iterateID2Entry(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName()); |
| | | try |
| | | try(final Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName())) |
| | | { |
| | | long storedEntryCount = id2entry.getRecordCount(txn); |
| | | while (cursor.next()) |
| | |
| | | } |
| | | } |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | |
| | | /** |
| | |
| | | { |
| | | iterateDN2ID(txn); |
| | | } |
| | | else if (verifyID2Children) |
| | | else if (verifyID2ChildrenCount) |
| | | { |
| | | iterateID2Children(txn); |
| | | } |
| | | else if (verifyID2Subtree) |
| | | { |
| | | iterateID2Subtree(txn); |
| | | iterateID2ChildrenCount(txn); |
| | | } |
| | | else if (attrIndexList.size() > 0) |
| | | { |
| | |
| | | */ |
| | | private void iterateDN2ID(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName()); |
| | | try |
| | | final Queue<ChildrenCount> childrenCounters = new LinkedList<>(); |
| | | ChildrenCount currentNode = null; |
| | | |
| | | try(final Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName())) |
| | | { |
| | | while (cursor.next()) |
| | | { |
| | | keyCount++; |
| | | |
| | | ByteString key = cursor.getKey(); |
| | | ByteString value = cursor.getValue(); |
| | | |
| | | EntryID entryID; |
| | | final ByteString key = cursor.getKey(); |
| | | final EntryID entryID; |
| | | try |
| | | { |
| | | entryID = new EntryID(value); |
| | | entryID = new EntryID(cursor.getValue()); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("File dn2id has malformed ID for DN <%s>:%n%s%n", key, StaticUtils.bytesToHex(value)); |
| | | } |
| | | logger.trace("File dn2id has malformed ID for DN <%s>", key, e); |
| | | continue; |
| | | } |
| | | |
| | | Entry entry; |
| | | currentNode = verifyID2ChildrenCount(txn, childrenCounters, key, entryID); |
| | | |
| | | final Entry entry; |
| | | try |
| | | { |
| | | entry = id2entry.get(txn, entryID); |
| | |
| | | if (entry == null) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File dn2id has DN <%s> referencing unknown ID %d%n", key, entryID); |
| | | } |
| | | } |
| | | else if (!key.equals(dnToDNKey(entry.getName(), verifyConfig.getBaseDN().size()))) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File dn2id has DN <%s> referencing entry with wrong DN <%s>%n", key, entry.getName()); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | finally |
| | | |
| | | while ((currentNode = childrenCounters.poll()) != null) |
| | | { |
| | | cursor.close(); |
| | | verifyID2ChildrenCount(txn, currentNode); |
| | | } |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Iterate through the entries in ID2Children to perform a check for |
| | | * index cleanliness. |
| | | * |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | private void iterateID2Children(ReadableTransaction txn) throws StorageRuntimeException |
| | | private ChildrenCount verifyID2ChildrenCount(ReadableTransaction txn, final Queue<ChildrenCount> childrenCounters, |
| | | final ByteString key, final EntryID entryID) |
| | | { |
| | | Cursor<ByteString, EntryIDSet> cursor = id2c.openCursor(txn); |
| | | try |
| | | while (childrenCounters.peek() != null && !DN2ID.isChild(childrenCounters.peek().baseDN, key)) |
| | | { |
| | | while (cursor.next()) |
| | | { |
| | | keyCount++; |
| | | |
| | | ByteString key = cursor.getKey(); |
| | | |
| | | EntryID entryID; |
| | | try |
| | | { |
| | | entryID = new EntryID(key); |
| | | // This subtree is fully processed, pop the counter of the parent DN from the stack and verify it's value |
| | | verifyID2ChildrenCount(txn, childrenCounters.remove()); |
| | | } |
| | | catch (Exception e) |
| | | if (childrenCounters.peek() != null) |
| | | { |
| | | childrenCounters.peek().numberOfChildren++; |
| | | } |
| | | final ChildrenCount node = new ChildrenCount(key, entryID); |
| | | childrenCounters.add(node); |
| | | return node; |
| | | } |
| | | |
| | | private void verifyID2ChildrenCount(ReadableTransaction txn, ChildrenCount parent) { |
| | | final long expected = parent.numberOfChildren; |
| | | final long currentValue = id2childrenCount.getCount(txn, parent.entryID); |
| | | if (expected != currentValue) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("File id2children has malformed ID %s%n", StaticUtils.bytesToHex(key)); |
| | | logger.trace("File id2childrenCount has wrong number of children for DN <%s> (got %d, expecting %d)", |
| | | parent.baseDN, currentValue, expected); |
| | | } |
| | | } |
| | | |
| | | private void iterateID2ChildrenCount(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor<EntryID, Long> cursor = id2childrenCount.openCursor(txn); |
| | | if (!cursor.next()) { |
| | | return; |
| | | } |
| | | |
| | | EntryID currentEntryID = new EntryID(-1); |
| | | while(cursor.next()) { |
| | | if (cursor.getKey().equals(currentEntryID)) { |
| | | /** Sharded cursor may return the same EntryID multiple times */ |
| | | continue; |
| | | } |
| | | |
| | | EntryIDSet entryIDSet; |
| | | |
| | | try |
| | | { |
| | | entryIDSet = cursor.getValue(); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | currentEntryID = cursor.getKey(); |
| | | if (!id2entry.containsEntryID(txn, currentEntryID)) { |
| | | logger.trace("File id2ChildrenCount reference non-existing EntryID <%d>%n", currentEntryID); |
| | | errorCount++; |
| | | logger.traceException(e); |
| | | logger.trace("File id2children has malformed ID list for ID %s", entryID); |
| | | continue; |
| | | } |
| | | |
| | | updateIndexStats(entryIDSet); |
| | | |
| | | if (entryIDSet.isDefined()) |
| | | { |
| | | Entry entry; |
| | | try |
| | | { |
| | | entry = id2entry.get(txn, entryID); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | logger.traceException(e); |
| | | errorCount++; |
| | | continue; |
| | | } |
| | | |
| | | if (entry == null) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2children has unknown ID %d%n", entryID); |
| | | } |
| | | continue; |
| | | } |
| | | |
| | | for (EntryID id : entryIDSet) |
| | | { |
| | | Entry childEntry; |
| | | try |
| | | { |
| | | childEntry = id2entry.get(txn, id); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | logger.traceException(e); |
| | | errorCount++; |
| | | continue; |
| | | } |
| | | |
| | | if (childEntry == null) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2children has ID %d referencing unknown ID %d%n", entryID, id); |
| | | } |
| | | continue; |
| | | } |
| | | |
| | | if (!childEntry.getName().isDescendantOf(entry.getName()) || |
| | | childEntry.getName().size() != |
| | | entry.getName().size() + 1) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2children has ID %d with DN <%s> " + |
| | | "referencing ID %d with non-child DN <%s>%n", |
| | | entryID, entry.getName(), id, childEntry.getName()); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Iterate through the entries in ID2Subtree to perform a check for |
| | | * index cleanliness. |
| | | * |
| | | * @throws StorageRuntimeException If an error occurs in the database. |
| | | */ |
| | | private void iterateID2Subtree(ReadableTransaction txn) throws StorageRuntimeException |
| | | { |
| | | Cursor<ByteString, EntryIDSet> cursor = id2s.openCursor(txn); |
| | | try |
| | | { |
| | | while (cursor.next()) |
| | | { |
| | | keyCount++; |
| | | |
| | | ByteString key = cursor.getKey(); |
| | | EntryID entryID; |
| | | try |
| | | { |
| | | entryID = new EntryID(key); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("File id2subtree has malformed ID %s%n", StaticUtils.bytesToHex(key)); |
| | | } |
| | | continue; |
| | | } |
| | | |
| | | EntryIDSet entryIDSet; |
| | | try |
| | | { |
| | | entryIDSet = cursor.getValue(); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | errorCount++; |
| | | logger.traceException(e); |
| | | logger.trace("File id2subtree has malformed ID list for ID %s", entryID); |
| | | continue; |
| | | } |
| | | |
| | | updateIndexStats(entryIDSet); |
| | | |
| | | if (entryIDSet.isDefined()) |
| | | { |
| | | Entry entry; |
| | | try |
| | | { |
| | | entry = id2entry.get(txn, entryID); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | logger.traceException(e); |
| | | errorCount++; |
| | | continue; |
| | | } |
| | | |
| | | if (entry == null) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2subtree has unknown ID %d%n", entryID); |
| | | } |
| | | continue; |
| | | } |
| | | |
| | | for (EntryID id : entryIDSet) |
| | | { |
| | | Entry subordEntry; |
| | | try |
| | | { |
| | | subordEntry = id2entry.get(txn, id); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | logger.traceException(e); |
| | | errorCount++; |
| | | continue; |
| | | } |
| | | |
| | | if (subordEntry == null) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2subtree has ID %d referencing " + |
| | | "unknown ID %d%n", entryID, id); |
| | | } |
| | | continue; |
| | | } |
| | | |
| | | if (!subordEntry.getName().isDescendantOf(entry.getName())) |
| | | { |
| | | errorCount++; |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2subtree has ID %d with DN <%s> " + |
| | | "referencing ID %d with non-subordinate DN <%s>%n", |
| | | entryID, entry.getName(), id, subordEntry.getName()); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | |
| | |
| | | return; |
| | | } |
| | | |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(vlvIndex.getName()); |
| | | try |
| | | try(final Cursor<ByteString, ByteString> cursor = txn.openCursor(vlvIndex.getName())) |
| | | { |
| | | while (cursor.next()) |
| | | { |
| | |
| | | |
| | | } |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | |
| | | /** |
| | |
| | | return; |
| | | } |
| | | |
| | | Cursor<ByteString,EntryIDSet> cursor = index.openCursor(txn); |
| | | try |
| | | try(final Cursor<ByteString,EntryIDSet> cursor = index.openCursor(txn)) |
| | | { |
| | | while (cursor.next()) |
| | | { |
| | |
| | | } |
| | | } |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | |
| | | /** |
| | |
| | | { |
| | | verifyDN2ID(txn, entryID, entry); |
| | | } |
| | | if (verifyID2Children) |
| | | { |
| | | verifyID2Children(txn, entryID, entry); |
| | | } |
| | | if (verifyID2Subtree) |
| | | { |
| | | verifyID2Subtree(txn, entryID, entry); |
| | | } |
| | | verifyIndex(txn, entryID, entry); |
| | | } |
| | | |
| | |
| | | } |
| | | |
| | | /** |
| | | * Check that the ID2Children index is complete for a given entry. |
| | | * |
| | | * @param entryID The entry ID. |
| | | * @param entry The entry to be checked. |
| | | */ |
| | | private void verifyID2Children(ReadableTransaction txn, EntryID entryID, Entry entry) |
| | | { |
| | | DN dn = entry.getName(); |
| | | |
| | | DN parentDN = getParent(dn); |
| | | if (parentDN != null) |
| | | { |
| | | EntryID parentID = null; |
| | | try |
| | | { |
| | | parentID = dn2id.get(txn, parentDN); |
| | | if (parentID == null) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File dn2id is missing key %s.%n", parentDN); |
| | | } |
| | | errorCount++; |
| | | } |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | logger.trace("File dn2id has error reading key %s: %s.", parentDN, e.getMessage()); |
| | | } |
| | | errorCount++; |
| | | } |
| | | if (parentID != null) |
| | | { |
| | | try |
| | | { |
| | | ConditionResult cr = indexContainsID(id2c, txn, parentID.toByteString(), entryID); |
| | | if (cr == ConditionResult.FALSE) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2children is missing ID %d for key %d.%n", entryID, parentID); |
| | | } |
| | | errorCount++; |
| | | } |
| | | else if (cr == ConditionResult.UNDEFINED) |
| | | { |
| | | incrEntryLimitStats(id2c, parentID.toByteString()); |
| | | } |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("File id2children has error reading key %d: %s.", parentID, e.getMessage()); |
| | | } |
| | | errorCount++; |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Check that the ID2Subtree index is complete for a given entry. |
| | | * |
| | | * @param entryID The entry ID. |
| | | * @param entry The entry to be checked. |
| | | */ |
| | | private void verifyID2Subtree(ReadableTransaction txn, EntryID entryID, Entry entry) |
| | | { |
| | | for (DN dn = getParent(entry.getName()); dn != null; dn = getParent(dn)) |
| | | { |
| | | EntryID id = null; |
| | | try |
| | | { |
| | | id = dn2id.get(txn, dn); |
| | | if (id == null) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File dn2id is missing key %s.%n", dn); |
| | | } |
| | | errorCount++; |
| | | } |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | logger.trace("File dn2id has error reading key %s: %s.%n", dn, e.getMessage()); |
| | | } |
| | | errorCount++; |
| | | } |
| | | if (id != null) |
| | | { |
| | | try |
| | | { |
| | | ConditionResult cr = indexContainsID(id2s, txn, id.toByteString(), entryID); |
| | | if (cr == ConditionResult.FALSE) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.trace("File id2subtree is missing ID %d for key %d.%n", entryID, id); |
| | | } |
| | | errorCount++; |
| | | } |
| | | else if (cr == ConditionResult.UNDEFINED) |
| | | { |
| | | incrEntryLimitStats(id2s, id.toByteString()); |
| | | } |
| | | } |
| | | catch (StorageRuntimeException e) |
| | | { |
| | | if (logger.isTraceEnabled()) |
| | | { |
| | | logger.traceException(e); |
| | | |
| | | logger.trace("File id2subtree has error reading key %d: %s.%n", id, e.getMessage()); |
| | | } |
| | | errorCount++; |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Construct a printable string from a raw key value. |
| | | * |
| | | * @param indexName |
| | |
| | | * The bytes of the key. |
| | | * @return A string that may be logged or printed. |
| | | */ |
| | | private String keyDump(String indexName, ByteSequence key) |
| | | private static String keyDump(String indexName, ByteSequence key) |
| | | { |
| | | StringBuilder buffer = new StringBuilder(128); |
| | | buffer.append("Index: "); |
| | |
| | | } |
| | | } |
| | | |
| | | private ConditionResult indexContainsID(Index index, ReadableTransaction txn, ByteString key, EntryID entryID) |
| | | private static ConditionResult indexContainsID(Index index, ReadableTransaction txn, ByteString key, EntryID entryID) |
| | | { |
| | | EntryIDSet entryIDSet = index.get(txn, key); |
| | | if (entryIDSet.isDefined()) |
| | |
| | | return dn.getParentDNInSuffix(); |
| | | } |
| | | |
| | | /** |
| | | * This class maintain the number of children for a given dn |
| | | */ |
| | | private static final class ChildrenCount { |
| | | private final ByteString baseDN; |
| | | private final EntryID entryID; |
| | | private long numberOfChildren; |
| | | |
| | | private ChildrenCount(ByteString dn, EntryID id) { |
| | | this.baseDN = dn; |
| | | this.entryID = id; |
| | | } |
| | | } |
| | | |
| | | /** This class reports progress of the verify job at fixed intervals. */ |
| | | private final class ProgressTask extends TimerTask |
| | | { |
| | |
| | | { |
| | | totalCount = dn2id.getRecordCount(txn); |
| | | } |
| | | else if (verifyID2Children) |
| | | else if (verifyID2ChildrenCount) |
| | | { |
| | | totalCount = id2c.getRecordCount(txn); |
| | | } |
| | | else if (verifyID2Subtree) |
| | | { |
| | | totalCount = id2s.getRecordCount(txn); |
| | | totalCount = id2childrenCount.getRecordCount(txn); |
| | | } |
| | | else if (!attrIndexList.isEmpty()) |
| | | { |
| | |
| | | } |
| | | else |
| | | { |
| | | totalCount = rootContainer.getEntryContainer(verifyConfig.getBaseDN()).getEntryCount(txn); |
| | | totalCount = rootContainer.getEntryContainer(verifyConfig.getBaseDN()).getNumberOfEntriesInBaseDN(); |
| | | } |
| | | } |
| | | |
| | |
| | | */ |
| | | package org.opends.server.backends.pluggable.spi; |
| | | |
| | | import java.io.Closeable; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | |
| | | /** |
| | | * Cursor that iterates through records in a tree. |
| | | * Sequential cursor extended with navigation methods. |
| | | * @param <K> Type of the record's key |
| | | * @param <V> Type of the record's value |
| | | */ |
| | | public interface Cursor<K,V> extends Closeable |
| | | public interface Cursor<K,V> extends SequentialCursor<K, V> |
| | | { |
| | | /** |
| | | * Positions the cursor to the provided key if it exists in the tree. |
| | |
| | | * @return {@code true} if the cursor could be positioned to the index, {@code false} otherwise |
| | | */ |
| | | boolean positionToIndex(int index); |
| | | |
| | | /** |
| | | * Moves this cursor to the next record in the tree. |
| | | * |
| | | * @return {@code true} if the cursor could move to the next record, |
| | | * {@code false} if no next record exists |
| | | */ |
| | | boolean next(); |
| | | |
| | | /** |
| | | * Returns the key of the record on which this cursor is currently positioned. |
| | | * |
| | | * @return the current record's key, |
| | | * or {@code null} if this cursor is not positioned on any record. |
| | | */ |
| | | K getKey(); |
| | | |
| | | /** |
| | | * Returns the value of the record on which this cursor is currently positioned. |
| | | * |
| | | * @return the current record's value, |
| | | * or {@code null} if this cursor is not positioned on any record. |
| | | */ |
| | | V getValue(); |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | void close(); |
| | | } |
| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Copyright 2014-2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable.spi; |
| | | |
| | | import java.io.Closeable; |
| | | import java.util.NoSuchElementException; |
| | | |
| | | /** |
| | | * Cursor extended with navigation methods. |
| | | * @param <K> Type of the record's key |
| | | * @param <V> Type of the record's value |
| | | */ |
| | | public interface SequentialCursor<K,V> extends Closeable |
| | | { |
| | | /** |
| | | * Moves this cursor to the next record in the tree. |
| | | * |
| | | * @return {@code true} if the cursor has moved to the next record, |
| | | * {@code false} if no next record exists leaving cursor |
| | | * in undefined state. |
| | | */ |
| | | boolean next(); |
| | | |
| | | /** |
| | | * Check whether this cursor is currently pointing to valid record. |
| | | * |
| | | * @return {@code true} if the cursor is pointing to a valid entry, |
| | | * {@code false} if cursor is not pointing to a valid entry |
| | | */ |
| | | boolean isDefined(); |
| | | |
| | | /** |
| | | * Returns the key of the record on which this cursor is currently positioned. |
| | | * |
| | | * @return the current record's key. |
| | | * @throws NoSuchElementException if the cursor is not defined. |
| | | */ |
| | | K getKey() throws NoSuchElementException; |
| | | |
| | | /** |
| | | * Returns the value of the record on which this cursor is currently positioned. |
| | | * |
| | | * @return the current record's value. |
| | | * @throws NoSuchElementException if the cursor is not defined. |
| | | */ |
| | | V getValue() throws NoSuchElementException; |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | void close(); |
| | | } |
| | |
| | | */ |
| | | package org.opends.server.backends.task; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | | import static org.opends.server.config.ConfigConstants.*; |
| | | import static org.opends.server.util.ServerConstants.*; |
| | |
| | | return ConditionResult.valueOf(ret != 0); |
| | | } |
| | | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException { |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | return numSubordinates(baseDN, true) + 1; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | return numSubordinates(parentDN, false); |
| | | } |
| | | |
| | | private long numSubordinates(DN entryDN, boolean subtree) throws DirectoryException |
| | | { |
| | | if (entryDN == null) |
| | | { |
| | |
| | | */ |
| | | package org.opends.server.extensions; |
| | | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.messages.ConfigMessages.*; |
| | | import static org.opends.server.config.ConfigConstants.*; |
| | | import static org.opends.server.extensions.ExtensionsConstants.*; |
| | |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long numSubordinates(DN entryDN, boolean subtree) |
| | | throws DirectoryException |
| | | public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException |
| | | { |
| | | ConfigEntry baseEntry = configEntries.get(entryDN); |
| | | checkNotNull(baseDN, "baseDN must not be null"); |
| | | final ConfigEntry baseEntry = configEntries.get(baseDN); |
| | | if (baseEntry == null) |
| | | { |
| | | return -1; |
| | | } |
| | | |
| | | if(!subtree) |
| | | { |
| | | return baseEntry.getChildren().size(); |
| | | } |
| | | else |
| | | { |
| | | long count = 0; |
| | | long count = 1; |
| | | for(ConfigEntry child : baseEntry.getChildren().values()) |
| | | { |
| | | count += numSubordinates(child.getDN(), true); |
| | | count += getNumberOfEntriesInBaseDN(child.getDN()); |
| | | count ++; |
| | | } |
| | | return count; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | | @Override |
| | | public long getNumberOfChildren(DN parentDN) throws DirectoryException |
| | | { |
| | | checkNotNull(parentDN, "parentDN must not be null"); |
| | | final ConfigEntry baseEntry = configEntries.get(parentDN); |
| | | return baseEntry != null ? baseEntry.getChildren().size() : -1; |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | |
| | | |
| | | try |
| | | { |
| | | long count = backend.numSubordinates(entry.getName(), false); |
| | | long count = backend.getNumberOfChildren(entry.getName()); |
| | | if(count >= 0) |
| | | { |
| | | return Attributes.create(rule.getAttributeType(), String.valueOf(count)); |
| | |
| | | @Override |
| | | public boolean hasValue(Entry entry, VirtualAttributeRule rule) |
| | | { |
| | | Backend backend = DirectoryServer.getBackend(entry.getName()); |
| | | Backend<?> backend = DirectoryServer.getBackend(entry.getName()); |
| | | |
| | | try |
| | | { |
| | | return backend.numSubordinates(entry.getName(), false) >= 0; |
| | | return backend.getNumberOfChildren(entry.getName()) >= 0; |
| | | } |
| | | catch(DirectoryException de) |
| | | { |
| | |
| | | Backend<?> backend = DirectoryServer.getBackend(entry.getName()); |
| | | try |
| | | { |
| | | long count = backend.numSubordinates(entry.getName(), false); |
| | | long count = backend.getNumberOfChildren(entry.getName()); |
| | | return count >= 0 && Long.parseLong(value.toString()) == count; |
| | | } |
| | | catch (NumberFormatException e) |
| | |
| | | long entryCount = -1; |
| | | try |
| | | { |
| | | entryCount = backend.numSubordinates(dn, true) + 1; |
| | | entryCount = backend.getNumberOfEntriesInBaseDN(dn); |
| | | } |
| | | catch (Exception ex) |
| | | { |
| | |
| | | throw new DirectoryException(ResultCode.OTHER, message); |
| | | } |
| | | |
| | | long numberOfEntries = backend.numSubordinates(getBaseDN(), true) + 1; |
| | | long numberOfEntries = backend.getNumberOfEntriesInBaseDN(getBaseDN()); |
| | | long entryCount = Math.min(numberOfEntries, 1000); |
| | | OutputStream os; |
| | | ReplLDIFOutputStream ros = null; |
| | |
| | | throw new DirectoryException(ResultCode.OTHER, msg); |
| | | } |
| | | |
| | | return backend.numSubordinates(getBaseDN(), true) + 1; |
| | | return backend.getNumberOfEntriesInBaseDN(getBaseDN()); |
| | | } |
| | | |
| | | /** {@inheritDoc} */ |
| | |
| | | |
| | | import org.forgerock.i18n.LocalizableMessage; |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteSequenceReader; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ByteStringBuilder; |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Decodes the provided ASN.1 octet string as a DN. |
| | | * |
| | |
| | | * decode the provided ASN.1 octet |
| | | * string as a DN. |
| | | */ |
| | | public static DN decode(ByteString dnString) |
| | | public static DN decode(ByteSequence dnString) |
| | | throws DirectoryException |
| | | { |
| | | // A null or empty DN is acceptable. |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Tests the {@code numSubordinates} method. |
| | | * |
| | | * @throws Exception If an unexpected problem occurs. |
| | | */ |
| | | @Test |
| | | public void testNumSubordinates() |
| | | throws Exception |
| | | public void testNumSubordinates() throws Exception |
| | | { |
| | | Backend<?> b = getLDIFBackend(); |
| | | |
| | | assertEquals(b.numSubordinates(DN.valueOf("o=ldif"), false), 1); |
| | | assertEquals(b.numSubordinates(DN.valueOf("o=ldif"), true), 26); |
| | | assertEquals(b.numSubordinates( |
| | | DN.valueOf("uid=user.1,ou=People,o=ldif"), false), 0); |
| | | assertEquals(b.numSubordinates( |
| | | DN.valueOf("uid=user.1,ou=People,o=ldif"), true), 0); |
| | | |
| | | assertEquals(b.getNumberOfChildren(DN.valueOf("o=ldif")), 1); |
| | | assertEquals(b.getNumberOfEntriesInBaseDN(DN.valueOf("o=ldif")), 27); |
| | | assertEquals(b.getNumberOfChildren(DN.valueOf("uid=user.1,ou=People,o=ldif")), 0); |
| | | try |
| | | { |
| | | b.numSubordinates(DN.valueOf("ou=nonexistent,o=ldif"), false); |
| | | fail("Expected an exception when calling numSubordinates on a " + |
| | | "non-existent entry"); |
| | | b.getNumberOfChildren(DN.valueOf("ou=nonexistent,o=ldif")); |
| | | fail("Expected an exception when calling numSubordinates on a " + "non-existent entry"); |
| | | } |
| | | catch (DirectoryException de) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | @Test(expectedExceptions=DirectoryException.class) |
| | | public void testCannotGetNumberOfEntriesForNotBaseDN() throws Exception { |
| | | assertEquals(getLDIFBackend().getNumberOfEntriesInBaseDN(DN.valueOf("uid=user.1,ou=People,o=ldif")), 0); |
| | | } |
| | | |
| | | /** |
| | | * Tests LDIF export functionality. |
| | |
| | | public void testNumSubordinates() throws Exception |
| | | { |
| | | DN dn = DN.valueOf("dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 1); |
| | | assertEquals(backend.numSubordinates(dn, true), 13); |
| | | assertEquals(backend.getNumberOfChildren(dn), 1); |
| | | assertEquals(backend.getNumberOfEntriesInBaseDN(dn), 14); |
| | | dn = DN.valueOf("ou=People,dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 12); |
| | | assertEquals(backend.numSubordinates(dn, true), 12); |
| | | assertEquals(backend.getNumberOfChildren(dn), 12); |
| | | dn = DN.valueOf("dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), -1); |
| | | assertEquals(backend.numSubordinates(dn, true), -1); |
| | | assertEquals(backend.getNumberOfChildren(dn), -1); |
| | | dn = DN.valueOf("dc=test1,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 2); |
| | | assertEquals(backend.numSubordinates(dn, true), 2); |
| | | assertEquals(backend.getNumberOfChildren(dn), 2); |
| | | dn = DN.valueOf("uid=user.10,ou=People,dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 0); |
| | | assertEquals(backend.numSubordinates(dn, true), 0); |
| | | assertEquals(backend.getNumberOfChildren(dn), 0); |
| | | dn = DN.valueOf("uid=does not exist,ou=People,dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), -1); |
| | | assertEquals(backend.numSubordinates(dn, true), -1); |
| | | assertEquals(backend.getNumberOfChildren(dn), -1); |
| | | } |
| | | |
| | | @Test(expectedExceptions = DirectoryException.class) |
| | | public void testCannotGetNumberOfEntriesInNotBaseDN() throws Exception |
| | | { |
| | | backend.getNumberOfEntriesInBaseDN(DN.valueOf("ou=People,dc=test,dc=com")); |
| | | } |
| | | |
| | | |
| | | @Test(dependsOnMethods = "testAdd") |
| | | public void testSearchIndex() throws Exception { |
| | | Set<String> attribs = new LinkedHashSet<String>(); |
| | |
| | | assertResultsCountIs(1, debugString); |
| | | } |
| | | |
| | | private void assertResultsCountIs(int expectedCount, String debugString) |
| | | private static void assertResultsCountIs(int expectedCount, String debugString) |
| | | { |
| | | int finalStartPos = debugString.indexOf("final=") + 13; |
| | | int finalEndPos = debugString.indexOf("]", finalStartPos); |
| | |
| | | } |
| | | |
| | | /** Returns the debug string from a search result. */ |
| | | private String getDebugString(List<SearchResultEntry> result) |
| | | private static String getDebugString(List<SearchResultEntry> result) |
| | | { |
| | | return result.get(0).getAttribute("debugsearchindex").get(0).toString(); |
| | | } |
| | | |
| | | /** Returns the results of subtree search on provided connection with provided filter. */ |
| | | private List<SearchResultEntry> doSubtreeSearch(String filter, Set<String> attribs) throws Exception |
| | | private static List<SearchResultEntry> doSubtreeSearch(String filter, Set<String> attribs) throws Exception |
| | | { |
| | | final SearchRequest request = |
| | | newSearchRequest("dc=test,dc=com", SearchScope.WHOLE_SUBTREE, filter).addAttribute(attribs); |
| | |
| | | } |
| | | } |
| | | |
| | | private List<AttributeIndexer> newAttributeIndexers(AttributeType attrType, MatchingRule matchingRule) |
| | | private static List<AttributeIndexer> newAttributeIndexers(AttributeType attrType, MatchingRule matchingRule) |
| | | { |
| | | List<AttributeIndexer> indexers = new ArrayList<AttributeIndexer>(); |
| | | for (org.forgerock.opendj.ldap.spi.Indexer indexer : matchingRule.getIndexers()) |
| | |
| | | return indexers; |
| | | } |
| | | |
| | | private IndexingOptions getOptions() |
| | | private static IndexingOptions getOptions() |
| | | { |
| | | final IndexingOptions options = mock(IndexingOptions.class); |
| | | when(options.substringKeySize()).thenReturn(6); |
| | | return options; |
| | | } |
| | | |
| | | private void assertIndexContainsID(List<? extends Indexer> indexers, Entry entry, Index index, EntryID entryID) |
| | | private static void assertIndexContainsID(List<? extends Indexer> indexers, Entry entry, Index index, EntryID entryID) |
| | | { |
| | | for (Indexer indexer : indexers) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | private void assertIndexContainsID(List<? extends Indexer> indexers, Entry entry, |
| | | Index index, EntryID entryID, ConditionResult expected) |
| | | private static void assertIndexContainsID(List<? extends Indexer> indexers, Entry entry, Index index, |
| | | EntryID entryID, ConditionResult expected) |
| | | { |
| | | for (Indexer indexer : indexers) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | private void assertIndexContainsID(Set<ByteString> addKeys, Index index, |
| | | EntryID entryID, ConditionResult expected) |
| | | private static void assertIndexContainsID(Set<ByteString> addKeys, Index index, EntryID entryID, |
| | | ConditionResult expected) |
| | | { |
| | | DatabaseEntry key = new DatabaseEntry(); |
| | | for (ByteString keyBytes : addKeys) |
| | |
| | | assertEquals(resultCode, 0); |
| | | } |
| | | |
| | | private boolean findContainer(List<DatabaseContainer> databases, String lowercaseName) |
| | | private static boolean findContainer(List<DatabaseContainer> databases, String lowercaseName) |
| | | { |
| | | for (DatabaseContainer dc : databases) |
| | | { |
| | |
| | | public void testNumSubordinatesIndexEntryLimitExceeded() throws Exception |
| | | { |
| | | DN dn = DN.valueOf("dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 1); |
| | | assertEquals(backend.numSubordinates(dn, true), 14); |
| | | assertEquals(backend.getNumberOfChildren(dn), 1); |
| | | assertEquals(backend.getNumberOfEntriesInBaseDN(dn), 15); |
| | | |
| | | // 1 entry was deleted and 2 added for a total of 13 |
| | | dn = DN.valueOf("ou=People,dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 13); |
| | | assertEquals(backend.numSubordinates(dn, true), 13); |
| | | assertEquals(backend.getNumberOfChildren(dn), 13); |
| | | dn = DN.valueOf("dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), -1); |
| | | assertEquals(backend.numSubordinates(dn, true), -1); |
| | | assertEquals(backend.getNumberOfChildren(dn), -1); |
| | | dn = DN.valueOf("dc=test1,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 2); |
| | | assertEquals(backend.numSubordinates(dn, true), 2); |
| | | assertEquals(backend.getNumberOfChildren(dn), 2); |
| | | dn = DN.valueOf("uid=user.10,ou=People,dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), 0); |
| | | assertEquals(backend.numSubordinates(dn, true), 0); |
| | | assertEquals(backend.getNumberOfChildren(dn), 0); |
| | | dn = DN.valueOf("uid=does not exist,ou=People,dc=test,dc=com"); |
| | | assertEquals(backend.numSubordinates(dn, false), -1); |
| | | assertEquals(backend.numSubordinates(dn, true), -1); |
| | | assertEquals(backend.getNumberOfChildren(dn), -1); |
| | | } |
| | | |
| | | |
| | |
| | | when(backendCfg.getBaseDN()).thenReturn(newSortedSet(baseDN)); |
| | | when(backendCfg.listBackendIndexes()).thenReturn(new String[0]); |
| | | when(backendCfg.listBackendVLVIndexes()).thenReturn(new String[] { SORT_ORDER_1, SORT_ORDER_2 }); |
| | | when(backendCfg.isSubordinateIndexesEnabled()).thenReturn(true); |
| | | |
| | | when(backendCfg.getDBDirectory()).thenReturn(BACKEND_NAME); |
| | | when(backendCfg.getDBDirectoryPermissions()).thenReturn("755"); |
| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Copyright 2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.assertj.core.api.Assertions.*; |
| | | import static org.mockito.Mockito.*; |
| | | |
| | | import java.util.ArrayList; |
| | | import java.util.List; |
| | | import java.util.concurrent.TimeUnit; |
| | | |
| | | import org.forgerock.opendj.config.server.ConfigException; |
| | | import org.forgerock.util.promise.NeverThrowsException; |
| | | import org.forgerock.util.promise.PromiseImpl; |
| | | import org.opends.server.DirectoryServerTestCase; |
| | | import org.opends.server.TestCaseUtils; |
| | | import org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType; |
| | | import org.opends.server.admin.std.server.BackendIndexCfg; |
| | | import org.opends.server.admin.std.server.PersistitBackendCfg; |
| | | import org.opends.server.backends.persistit.PersistItStorage; |
| | | import org.opends.server.backends.pluggable.spi.ReadOperation; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.SequentialCursor; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.WriteOperation; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.opends.server.core.MemoryQuota; |
| | | import org.opends.server.core.ServerContext; |
| | | import org.opends.server.extensions.DiskSpaceMonitor; |
| | | import org.opends.server.types.DN; |
| | | import org.opends.server.types.DirectoryException; |
| | | import org.testng.annotations.AfterClass; |
| | | import org.testng.annotations.AfterMethod; |
| | | import org.testng.annotations.BeforeClass; |
| | | import org.testng.annotations.BeforeMethod; |
| | | import org.testng.annotations.Test; |
| | | |
| | | @Test(groups = { "precommit", "pluggablebackend" }, sequential = true) |
| | | public class DN2IDTest extends DirectoryServerTestCase |
| | | { |
| | | private final TreeName dn2IDTreeName = new TreeName("base-dn", "index-id"); |
| | | private DN baseDN; |
| | | private DN2ID dn2ID; |
| | | private PersistItStorage storage; |
| | | |
| | | @BeforeClass |
| | | public void startFakeServer() throws Exception |
| | | { |
| | | TestCaseUtils.startFakeServer(); |
| | | } |
| | | |
| | | @AfterClass |
| | | public void stopFakeServer() throws Exception |
| | | { |
| | | TestCaseUtils.shutdownFakeServer(); |
| | | } |
| | | |
| | | @BeforeMethod |
| | | public void setUp() throws Exception |
| | | { |
| | | ServerContext serverContext = mock(ServerContext.class); |
| | | when(serverContext.getMemoryQuota()).thenReturn(new MemoryQuota()); |
| | | when(serverContext.getDiskSpaceMonitor()).thenReturn(mock(DiskSpaceMonitor.class)); |
| | | |
| | | storage = new PersistItStorage(createBackendCfg(), serverContext); |
| | | try(final org.opends.server.backends.pluggable.spi.Importer importer = storage.startImport()) { |
| | | importer.createTree(dn2IDTreeName); |
| | | } |
| | | |
| | | storage.open(); |
| | | |
| | | baseDN = dn("dc=example, dc=com"); |
| | | dn2ID = new DN2ID(dn2IDTreeName, baseDN); |
| | | } |
| | | |
| | | @AfterMethod |
| | | public void tearDown() |
| | | { |
| | | storage.close(); |
| | | storage.removeStorageFiles(); |
| | | } |
| | | |
| | | private void populate() throws DirectoryException, Exception |
| | | { |
| | | final String[] dns = |
| | | { |
| | | "dc=example,dc=com", |
| | | "ou=Devices,dc=example,dc=com", |
| | | "cn=dev0,ou=Devices,dc=example,dc=com", |
| | | "ou=People,dc=example,dc=com", |
| | | "cn=foo,ou=People,dc=example,dc=com", |
| | | "cn=barbar,ou=People,dc=example,dc=com", |
| | | "cn=foofoo,ou=People,dc=example,dc=com", |
| | | "cn=bar,ou=People,dc=example,dc=com", |
| | | "cn=dev0,cn=bar,ou=People,dc=example,dc=com", |
| | | "cn=dev1,cn=bar,ou=People,dc=example,dc=com" |
| | | }; |
| | | |
| | | for (int i = 0; i < dns.length; i++) |
| | | { |
| | | put(dn(dns[i]), i + 1); |
| | | } |
| | | } |
| | | |
| | | @Test |
| | | public void testCanAddDN() throws Exception |
| | | { |
| | | populate(); |
| | | |
| | | assertThat(get("dc=example,dc=com")).isEqualTo(id(1)); |
| | | assertThat(get("ou=People,dc=example,dc=com")).isEqualTo(id(4)); |
| | | assertThat(get("cn=dev1,cn=bar,ou=People,dc=example,dc=com")).isEqualTo(id(10)); |
| | | } |
| | | |
| | | @Test |
| | | public void testGetNonExistingDNReturnNull() throws Exception |
| | | { |
| | | assertThat(get("dc=non,dc=existing")).isNull(); |
| | | } |
| | | |
| | | @Test |
| | | public void testCanRemove() throws Exception |
| | | { |
| | | populate(); |
| | | |
| | | assertThat(get("ou=People,dc=example,dc=com")).isNotNull(); |
| | | assertThat(remove("ou=People,dc=example,dc=com")).isTrue(); |
| | | assertThat(get("ou=People,dc=example,dc=com")).isNull(); |
| | | } |
| | | |
| | | @Test |
| | | public void testRemoveNonExistingEntry() throws Exception |
| | | { |
| | | assertThat(remove("dc=non,dc=existing")).isFalse(); |
| | | } |
| | | |
| | | @Test |
| | | public void testTraverseChildren() throws Exception |
| | | { |
| | | populate(); |
| | | assertThat(traverseChildren("ou=People,dc=example,dc=com")) |
| | | .containsExactly( |
| | | get("cn=bar,ou=People,dc=example,dc=com"), |
| | | get("cn=barbar,ou=People,dc=example,dc=com"), |
| | | get("cn=foo,ou=People,dc=example,dc=com"), |
| | | get("cn=foofoo,ou=People,dc=example,dc=com")); |
| | | } |
| | | |
| | | @Test |
| | | public void testTraverseSubordinates() throws Exception |
| | | { |
| | | populate(); |
| | | assertThat(traverseSubordinates("ou=People,dc=example,dc=com")) |
| | | .containsExactly( |
| | | get("cn=bar,ou=People,dc=example,dc=com"), |
| | | get("cn=dev0,cn=bar,ou=People,dc=example,dc=com"), |
| | | get("cn=dev1,cn=bar,ou=People,dc=example,dc=com"), |
| | | get("cn=barbar,ou=People,dc=example,dc=com"), |
| | | get("cn=foo,ou=People,dc=example,dc=com"), |
| | | get("cn=foofoo,ou=People,dc=example,dc=com")); |
| | | } |
| | | |
| | | private EntryID get(final String dn) throws Exception |
| | | { |
| | | return storage.read(new ReadOperation<EntryID>() |
| | | { |
| | | @Override |
| | | public EntryID run(ReadableTransaction txn) throws Exception |
| | | { |
| | | return dn2ID.get(txn, dn(dn)); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private List<EntryID> traverseChildren(final String dn) throws Exception |
| | | { |
| | | return storage.read(new ReadOperation<List<EntryID>>() |
| | | { |
| | | @Override |
| | | public List<EntryID> run(ReadableTransaction txn) throws Exception |
| | | { |
| | | try (final SequentialCursor<Void, EntryID> cursor = dn2ID.openChildrenCursor(txn, dn(dn))) |
| | | { |
| | | return getAllIDs(cursor); |
| | | } |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private List<EntryID> traverseSubordinates(final String dn) throws Exception |
| | | { |
| | | return storage.read(new ReadOperation<List<EntryID>>() |
| | | { |
| | | @Override |
| | | public List<EntryID> run(ReadableTransaction txn) throws Exception |
| | | { |
| | | try (final SequentialCursor<Void, EntryID> cursor = dn2ID.openSubordinatesCursor(txn, dn(dn))) |
| | | { |
| | | return getAllIDs(cursor); |
| | | } |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private static <K, V> List<V> getAllIDs(SequentialCursor<K, V> cursor) { |
| | | final List<V> values = new ArrayList<>(); |
| | | while(cursor.next()) { |
| | | values.add(cursor.getValue()); |
| | | } |
| | | return values; |
| | | } |
| | | |
| | | private void put(final DN dn, final long id) throws Exception |
| | | { |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | dn2ID.put(txn, dn, new EntryID(id)); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private boolean remove(final String dn) throws Exception |
| | | { |
| | | final PromiseImpl<Boolean, NeverThrowsException> p = PromiseImpl.create(); |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | p.handleResult(dn2ID.remove(txn, dn(dn))); |
| | | } |
| | | }); |
| | | return p.get(10, TimeUnit.SECONDS); |
| | | } |
| | | |
| | | private static DN dn(String dn) throws DirectoryException |
| | | { |
| | | return DN.valueOf(dn); |
| | | } |
| | | |
| | | private static EntryID id(long id) |
| | | { |
| | | return new EntryID(id); |
| | | } |
| | | |
| | | private static PersistitBackendCfg createBackendCfg() throws ConfigException, DirectoryException |
| | | { |
| | | String homeDirName = "pdb_test"; |
| | | PersistitBackendCfg backendCfg = mock(PersistitBackendCfg.class); |
| | | |
| | | when(backendCfg.getBackendId()).thenReturn("persTest" + homeDirName); |
| | | when(backendCfg.getDBDirectory()).thenReturn(homeDirName); |
| | | when(backendCfg.getDBDirectoryPermissions()).thenReturn("755"); |
| | | when(backendCfg.getDBCacheSize()).thenReturn(0L); |
| | | when(backendCfg.getDBCachePercent()).thenReturn(20); |
| | | when(backendCfg.getBaseDN()).thenReturn(TestCaseUtils.newSortedSet(DN.valueOf("dc=test,dc=com"))); |
| | | when(backendCfg.dn()).thenReturn(DN.valueOf("dc=test,dc=com")); |
| | | when(backendCfg.listBackendIndexes()).thenReturn(new String[] { "sn" }); |
| | | when(backendCfg.listBackendVLVIndexes()).thenReturn(new String[0]); |
| | | |
| | | BackendIndexCfg indexCfg = mock(BackendIndexCfg.class); |
| | | when(indexCfg.getIndexType()).thenReturn(TestCaseUtils.newSortedSet(IndexType.PRESENCE, IndexType.EQUALITY)); |
| | | when(indexCfg.getAttribute()).thenReturn(DirectoryServer.getAttributeType("sn")); |
| | | when(backendCfg.getBackendIndex("sn")).thenReturn(indexCfg); |
| | | |
| | | return backendCfg; |
| | | } |
| | | |
| | | } |
| | |
| | | @Test(groups = { "precommit", "pluggablebackend", "unit" }, sequential=true) |
| | | public class EntryIDSetTest extends DirectoryServerTestCase |
| | | { |
| | | private static final int UNDEFINED_INITIAL_SIZE = 10; |
| | | |
| | | private final static ByteString KEY = ByteString.valueOf("test"); |
| | | |
| | | @Test(expectedExceptions = NullPointerException.class) |
| | |
| | | assertThat(codec.decode(KEY, string).isDefined()).isFalse(); |
| | | assertThat(codec.decode(KEY, string).size()).isEqualTo(Long.MAX_VALUE); |
| | | |
| | | string = codec.encode(newUndefinedSetWithSize(ByteString.valueOf("none"), 1234)); |
| | | string = codec.encode(newUndefinedSetWithKey(ByteString.valueOf("none"))); |
| | | assertThat(codec.decode(KEY, string).isDefined()).isFalse(); |
| | | assertThat(codec.decode(KEY, string).size()).isEqualTo(1234); |
| | | assertThat(codec.decode(KEY, string).size()).isEqualTo(Long.MAX_VALUE); |
| | | } |
| | | |
| | | @Test(enabled = false, dataProvider = "codec") |
| | |
| | | @Test(expectedExceptions = NullPointerException.class) |
| | | public void testUndefinedCannotCreateWithNull() |
| | | { |
| | | newUndefinedSetWithSize(null, 1); |
| | | newUndefinedSetWithKey(null); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedAdd() |
| | | public void testUndefinedAddDoesNothing() |
| | | { |
| | | EntryIDSet undefined = newUndefinedWithInitialSize(); |
| | | |
| | | final EntryIDSet undefined = newUndefinedSet(); |
| | | assertThat(undefined.add(new EntryID(4))).isTrue(); |
| | | assertThat(undefined.size()).isEqualTo(UNDEFINED_INITIAL_SIZE + 1); |
| | | assertThat(undefined.size()).isEqualTo(Long.MAX_VALUE); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedAddAll() |
| | | public void testUndefinedAddAllDoesNothing() |
| | | { |
| | | EntryIDSet undefined = newUndefinedWithInitialSize(); |
| | | final EntryIDSet undefined = newUndefinedSet(); |
| | | |
| | | undefined.addAll(newDefinedSet()); |
| | | assertThat(newUndefinedWithInitialSize().size()).isEqualTo(UNDEFINED_INITIAL_SIZE); |
| | | assertThat(undefined.size()).isEqualTo(Long.MAX_VALUE); |
| | | |
| | | undefined.addAll(newDefinedSet(2, 4, 6)); |
| | | assertThat(undefined.size()).isEqualTo(UNDEFINED_INITIAL_SIZE + 3); |
| | | assertThat(undefined.size()).isEqualTo(Long.MAX_VALUE); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedRemove() |
| | | public void testUndefinedRemoveDoesNothing() |
| | | { |
| | | EntryIDSet undefined = newUndefinedWithInitialSize(); |
| | | |
| | | final EntryIDSet undefined = newUndefinedSet(); |
| | | assertThat(undefined.remove(new EntryID(4))).isTrue(); |
| | | assertThat(undefined.size()).isEqualTo(UNDEFINED_INITIAL_SIZE - 1); |
| | | assertThat(undefined.size()).isEqualTo(Long.MAX_VALUE); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedRemoveUnderflow() |
| | | public void testUndefinedDeleteAllDoesNothing() |
| | | { |
| | | EntryIDSet undefined = newUndefinedSetWithSize(ByteString.valueOf("test"), 0); |
| | | |
| | | assertThat(undefined.remove(new EntryID(4))).isTrue(); |
| | | assertThat(undefined.size()).isEqualTo(0); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedDeleteAll() |
| | | { |
| | | EntryIDSet undefined = newUndefinedWithInitialSize(); |
| | | |
| | | final EntryIDSet undefined = newUndefinedSet(); |
| | | undefined.removeAll(newDefinedSet(20, 21, 22)); |
| | | assertThat(undefined.size()).isEqualTo(UNDEFINED_INITIAL_SIZE - 3); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedDeleteAllUnderflow() |
| | | { |
| | | EntryIDSet undefined = newUndefinedSetWithSize(ByteString.valueOf("test"), 0); |
| | | |
| | | undefined.removeAll(newDefinedSet(20, 21, 22)); |
| | | assertThat(undefined.size()).isEqualTo(0); |
| | | assertThat(undefined.size()).isEqualTo(Long.MAX_VALUE); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedContain() |
| | | { |
| | | assertThat(newUndefinedWithInitialSize().contains(new EntryID(4))).isTrue(); |
| | | assertThat(newUndefinedSet().contains(new EntryID(4))).isTrue(); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedIterator() |
| | | { |
| | | assertThat(newUndefinedWithInitialSize().iterator().hasNext()).isFalse(); |
| | | assertThat(newUndefinedSet().iterator().hasNext()).isFalse(); |
| | | } |
| | | |
| | | @Test |
| | | public void testUndefinedIteratorWithBegin() |
| | | { |
| | | assertThat(newUndefinedWithInitialSize().iterator(new EntryID(8)).hasNext()).isFalse(); |
| | | assertThat(newUndefinedSet().iterator(new EntryID(8)).hasNext()).isFalse(); |
| | | } |
| | | |
| | | @Test |
| | |
| | | assertThat(newUndefinedSet().isDefined()).isFalse(); |
| | | assertThat(newUndefinedSetWithKey(KEY).isDefined()).isFalse(); |
| | | assertThat(newUndefinedSetWithKey(KEY).size()).isEqualTo(Long.MAX_VALUE); |
| | | |
| | | assertThat(newUndefinedSetWithSize(KEY, 42).isDefined()).isFalse(); |
| | | assertThat(newUndefinedSetWithSize(KEY, 42).size()).isEqualTo(42); |
| | | } |
| | | |
| | | @Test |
| | |
| | | assertIdsEquals(retained, 1, 3, 5, 7, 9); |
| | | } |
| | | |
| | | private static EntryIDSet newUndefinedWithInitialSize() |
| | | { |
| | | return newUndefinedSetWithSize(ByteString.valueOf("test"), UNDEFINED_INITIAL_SIZE); |
| | | } |
| | | |
| | | @DataProvider(name = "codecs") |
| | | public static Object[][] codecs() { |
| | | return new Object[][] { { CODEC_V1 }, { CODEC_V2 } }; |
| New file |
| | |
| | | /* |
| | | * CDDL HEADER START |
| | | * |
| | | * The contents of this file are subject to the terms of the |
| | | * Common Development and Distribution License, Version 1.0 only |
| | | * (the "License"). You may not use this file except in compliance |
| | | * with the License. |
| | | * |
| | | * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt |
| | | * or http://forgerock.org/license/CDDLv1.0.html. |
| | | * See the License for the specific language governing permissions |
| | | * and limitations under the License. |
| | | * |
| | | * When distributing Covered Code, include this CDDL HEADER in each |
| | | * file and include the License file at legal-notices/CDDLv1_0.txt. |
| | | * If applicable, add the following below this CDDL HEADER, with the |
| | | * fields enclosed by brackets "[]" replaced with your own identifying |
| | | * information: |
| | | * Portions Copyright [yyyy] [name of copyright owner] |
| | | * |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Copyright 2015 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.assertj.core.api.Assertions.*; |
| | | import static org.mockito.Mockito.*; |
| | | |
| | | import java.util.Random; |
| | | import java.util.concurrent.Callable; |
| | | import java.util.concurrent.ExecutorService; |
| | | import java.util.concurrent.Executors; |
| | | import java.util.concurrent.TimeUnit; |
| | | |
| | | import org.forgerock.opendj.config.server.ConfigException; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.util.promise.NeverThrowsException; |
| | | import org.forgerock.util.promise.PromiseImpl; |
| | | import org.opends.server.DirectoryServerTestCase; |
| | | import org.opends.server.TestCaseUtils; |
| | | import org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType; |
| | | import org.opends.server.admin.std.server.BackendIndexCfg; |
| | | import org.opends.server.admin.std.server.PersistitBackendCfg; |
| | | import org.opends.server.backends.persistit.PersistItStorage; |
| | | import org.opends.server.backends.pluggable.spi.ReadOperation; |
| | | import org.opends.server.backends.pluggable.spi.ReadableTransaction; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.WriteOperation; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.opends.server.core.MemoryQuota; |
| | | import org.opends.server.core.ServerContext; |
| | | import org.opends.server.extensions.DiskSpaceMonitor; |
| | | import org.opends.server.types.DN; |
| | | import org.opends.server.types.DirectoryException; |
| | | import org.testng.annotations.AfterClass; |
| | | import org.testng.annotations.AfterMethod; |
| | | import org.testng.annotations.BeforeClass; |
| | | import org.testng.annotations.BeforeMethod; |
| | | import org.testng.annotations.Test; |
| | | |
| | | @Test(groups = { "precommit", "pluggablebackend" }, sequential = true) |
| | | public class ID2CountTest extends DirectoryServerTestCase |
| | | { |
| | | private final TreeName id2CountTreeName = new TreeName("base-dn", "index-id"); |
| | | private ExecutorService parallelExecutor; |
| | | private ID2Count id2Count; |
| | | private PersistItStorage storage; |
| | | |
| | | @BeforeClass |
| | | public void startFakeServer() throws Exception { |
| | | TestCaseUtils.startFakeServer(); |
| | | } |
| | | |
| | | @AfterClass |
| | | public void stopFakeServer() throws Exception { |
| | | TestCaseUtils.shutdownFakeServer(); |
| | | } |
| | | |
| | | @BeforeMethod |
| | | public void setUp() throws Exception |
| | | { |
| | | ServerContext serverContext = mock(ServerContext.class); |
| | | when(serverContext.getMemoryQuota()).thenReturn(new MemoryQuota()); |
| | | when(serverContext.getDiskSpaceMonitor()).thenReturn(mock(DiskSpaceMonitor.class)); |
| | | |
| | | storage = new PersistItStorage(createBackendCfg(), serverContext); |
| | | org.opends.server.backends.pluggable.spi.Importer importer = storage.startImport(); |
| | | importer.createTree(id2CountTreeName); |
| | | importer.close(); |
| | | |
| | | storage.open(); |
| | | |
| | | id2Count = new ID2Count(id2CountTreeName); |
| | | |
| | | parallelExecutor = Executors.newFixedThreadPool(32); |
| | | } |
| | | |
| | | @AfterMethod |
| | | public void tearDown() { |
| | | storage.close(); |
| | | storage.removeStorageFiles(); |
| | | } |
| | | |
| | | @Test |
| | | public void testConcurrentAddDelta() throws Exception |
| | | { |
| | | final long expected = stressCounter(8192, id(1), parallelExecutor); |
| | | waitExecutorTermination(); |
| | | |
| | | assertThat(getCounter(id(1))).isEqualTo(expected); |
| | | assertThat(getTotalCounter()).isEqualTo(expected); |
| | | } |
| | | |
| | | @Test |
| | | public void testConcurrentTotalCounter() throws Exception |
| | | { |
| | | long totalExpected = 0; |
| | | for(int i = 0 ; i < 64 ; i++) { |
| | | totalExpected += stressCounter(128, id(i), parallelExecutor); |
| | | } |
| | | waitExecutorTermination(); |
| | | |
| | | assertThat(getTotalCounter()).isEqualTo(totalExpected); |
| | | } |
| | | |
| | | @Test |
| | | public void testDeleteCounterDecrementTotalCounter() throws Exception |
| | | { |
| | | addDelta(id(0), 1024); |
| | | addDelta(id(1), 1024); |
| | | addDelta(id(2), 1024); |
| | | addDelta(id(3), 1024); |
| | | assertThat(getTotalCounter()).isEqualTo(4096); |
| | | |
| | | assertThat(deleteCount(id(0))).isEqualTo(1024); |
| | | assertThat(getTotalCounter()).isEqualTo(3072); |
| | | |
| | | assertThat(deleteCount(id(1))).isEqualTo(1024); |
| | | assertThat(deleteCount(id(2))).isEqualTo(1024); |
| | | assertThat(deleteCount(id(3))).isEqualTo(1024); |
| | | assertThat(getTotalCounter()).isEqualTo(0); |
| | | } |
| | | |
| | | @Test |
| | | public void testGetCounterNonExistingKey() throws Exception |
| | | { |
| | | assertThat(getCounter(id(987654))).isEqualTo(0); |
| | | } |
| | | |
| | | private void waitExecutorTermination() throws InterruptedException |
| | | { |
| | | parallelExecutor.shutdown(); |
| | | parallelExecutor.awaitTermination(30, TimeUnit.SECONDS); |
| | | } |
| | | |
| | | private long stressCounter(final int numIterations, final EntryID key, final ExecutorService exec) |
| | | { |
| | | final Random r = new Random(); |
| | | long expected = 0; |
| | | for(int i = 0 ; i < numIterations ; i++) { |
| | | final long delta = r.nextLong(); |
| | | expected += delta; |
| | | |
| | | exec.submit(new Callable<Void>() |
| | | { |
| | | @Override |
| | | public Void call() throws Exception |
| | | { |
| | | addDelta(key, delta); |
| | | return null; |
| | | } |
| | | }); |
| | | } |
| | | return expected; |
| | | } |
| | | |
| | | private long deleteCount(final EntryID key) throws Exception { |
| | | final PromiseImpl<Long, NeverThrowsException> l = PromiseImpl.create(); |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | l.handleResult(id2Count.deleteCount(txn, key)); |
| | | } |
| | | }); |
| | | return l.get(); |
| | | } |
| | | |
| | | private void addDelta(final EntryID key, final long delta) throws Exception { |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | id2Count.addDelta(txn, key, delta); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private long getCounter(final EntryID key) throws Exception { |
| | | return storage.read(new ReadOperation<Long>() |
| | | { |
| | | @Override |
| | | public Long run(ReadableTransaction txn) throws Exception |
| | | { |
| | | return id2Count.getCount(txn, key); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | private long getTotalCounter() throws Exception { |
| | | return storage.read(new ReadOperation<Long>() |
| | | { |
| | | @Override |
| | | public Long run(ReadableTransaction txn) throws Exception |
| | | { |
| | | return id2Count.getTotalCount(txn); |
| | | } |
| | | }); |
| | | } |
| | | |
| | | public static EntryID id(long id) { |
| | | return new EntryID(id); |
| | | } |
| | | |
| | | private PersistitBackendCfg createBackendCfg() throws ConfigException, DirectoryException |
| | | { |
| | | String homeDirName = "pdb_test"; |
| | | PersistitBackendCfg backendCfg = mock(PersistitBackendCfg.class); |
| | | |
| | | when(backendCfg.getBackendId()).thenReturn("persTest" + homeDirName); |
| | | when(backendCfg.getDBDirectory()).thenReturn(homeDirName); |
| | | when(backendCfg.getDBDirectoryPermissions()).thenReturn("755"); |
| | | when(backendCfg.getDBCacheSize()).thenReturn(0L); |
| | | when(backendCfg.getDBCachePercent()).thenReturn(20); |
| | | when(backendCfg.getBaseDN()).thenReturn(TestCaseUtils.newSortedSet(DN.valueOf("dc=test,dc=com"))); |
| | | when(backendCfg.dn()).thenReturn(DN.valueOf("dc=test,dc=com")); |
| | | when(backendCfg.listBackendIndexes()).thenReturn(new String[] { "sn" }); |
| | | when(backendCfg.listBackendVLVIndexes()).thenReturn(new String[0]); |
| | | |
| | | BackendIndexCfg indexCfg = mock(BackendIndexCfg.class); |
| | | when(indexCfg.getIndexType()).thenReturn(TestCaseUtils.newSortedSet(IndexType.PRESENCE, IndexType.EQUALITY)); |
| | | when(indexCfg.getAttribute()).thenReturn(DirectoryServer.getAttributeType("sn")); |
| | | when(backendCfg.getBackendIndex("sn")).thenReturn(indexCfg); |
| | | |
| | | return backendCfg; |
| | | } |
| | | |
| | | } |
| | |
| | | when(backendCfg.getBaseDN()).thenReturn(newSortedSet(testBaseDN)); |
| | | when(backendCfg.listBackendIndexes()).thenReturn(backendIndexes); |
| | | when(backendCfg.listBackendVLVIndexes()).thenReturn(backendVlvIndexes); |
| | | when(backendCfg.isSubordinateIndexesEnabled()).thenReturn(true); |
| | | |
| | | BackendIndexCfg indexCfg = mock(BackendIndexCfg.class); |
| | | when(indexCfg.getIndexType()).thenReturn(newSortedSet(IndexType.PRESENCE, IndexType.EQUALITY)); |
| | |
| | | assertEquals(backend.hasSubordinates(DN.valueOf("dc=a")), ConditionResult.UNDEFINED, |
| | | "Subordinates query on unknown baseDN should return UNDEFINED."); |
| | | |
| | | assertEquals(backend.numSubordinates(testBaseDN, false), 1); |
| | | assertEquals(backend.numSubordinates(testBaseDN, true), getTotalNumberOfLDIFEntries() - 1, "Wrong DIT count."); |
| | | assertEquals(backend.getNumberOfChildren(testBaseDN), 1); |
| | | assertEquals(backend.getNumberOfEntriesInBaseDN(testBaseDN), getTotalNumberOfLDIFEntries(), "Wrong DIT count."); |
| | | assertEquals(backend.hasSubordinates(searchDN), ConditionResult.FALSE, |
| | | "Leaf entry should not have any subordinates."); |
| | | } |
| | |
| | | |
| | | backend.openBackend(); |
| | | assertEquals(backend.getEntryCount(), ldifNumberOfEntries, "Not enough entries in DIT."); |
| | | /** +1 for the testBaseDN itself */ |
| | | assertEquals(backend.getNumberOfEntriesInBaseDN(testBaseDN), ldifNumberOfEntries, "Not enough entries in DIT."); |
| | | assertEquals(backend.getNumberOfChildren(testBaseDN), 1, "Not enough entries in DIT."); |
| | | /** -2 for baseDn and People entry */ |
| | | assertEquals(backend.getNumberOfChildren(testBaseDN.child(DN.valueOf("ou=People"))), ldifNumberOfEntries - 2, "Not enough entries in DIT."); |
| | | } |
| | | |
| | | @Test(dependsOnMethods = {"testImportLDIF"}) |
| | |
| | | import org.opends.server.extensions.DiskSpaceMonitor; |
| | | import org.opends.server.types.DN; |
| | | import org.opends.server.types.DirectoryException; |
| | | import org.testng.annotations.AfterClass; |
| | | import org.testng.annotations.AfterMethod; |
| | | import org.testng.annotations.BeforeClass; |
| | | import org.testng.annotations.BeforeMethod; |
| | |
| | | |
| | | @BeforeClass |
| | | public void startServer() throws Exception { |
| | | TestCaseUtils.startServer(); |
| | | TestCaseUtils.startFakeServer(); |
| | | } |
| | | |
| | | @AfterClass |
| | | public void stopServer() throws Exception { |
| | | TestCaseUtils.shutdownFakeServer(); |
| | | } |
| | | |
| | | @BeforeMethod |
| | |
| | | |
| | | ServerContext serverContext = mock(ServerContext.class); |
| | | when(serverContext.getMemoryQuota()).thenReturn(new MemoryQuota()); |
| | | when(serverContext.getDiskSpaceMonitor()).thenReturn(new DiskSpaceMonitor()); |
| | | when(serverContext.getDiskSpaceMonitor()).thenReturn(mock(DiskSpaceMonitor.class)); |
| | | |
| | | storage = new PersistItStorage(createBackendCfg(), serverContext); |
| | | org.opends.server.backends.pluggable.spi.Importer importer = storage.startImport(); |
| | |
| | | when(backendCfg.getDBDirectoryPermissions()).thenReturn("755"); |
| | | when(backendCfg.getDBCacheSize()).thenReturn(0L); |
| | | when(backendCfg.getDBCachePercent()).thenReturn(20); |
| | | when(backendCfg.isSubordinateIndexesEnabled()).thenReturn(true); |
| | | when(backendCfg.getBaseDN()).thenReturn(TestCaseUtils.newSortedSet(DN.valueOf("dc=test,dc=com"))); |
| | | when(backendCfg.dn()).thenReturn(DN.valueOf("dc=test,dc=com")); |
| | | when(backendCfg.listBackendIndexes()).thenReturn(new String[] { "sn" }); |
| | |
| | | assertThat(actual).containsAll(asList(expected)); |
| | | } |
| | | |
| | | public static void assertIsEmpty(EntryIDSet actual) |
| | | { |
| | | assertIdsEquals(actual); |
| | | } |
| | | |
| | | public static void assertIdsEquals(EntryIDSet actual, long... expected) |
| | | { |
| | | // needed is undefined EntryIDSet" => "needed since undefined EntryIDSet |
| | | assertThat(actual.isDefined()); |
| | | assertIdsEquals(actual.iterator(), expected); |
| | | } |
| | | |
| | |
| | | |
| | | TaskBackend taskBackend = |
| | | (TaskBackend) DirectoryServer.getBackend(DN.valueOf("cn=tasks")); |
| | | long tasksCountBefore = taskBackend.numSubordinates(DN.valueOf( |
| | | "cn=Scheduled Tasks,cn=tasks"), true); |
| | | long tasksCountBefore = taskBackend.getNumberOfEntriesInBaseDN(DN.valueOf("cn=Scheduled Tasks,cn=tasks")); |
| | | |
| | | assertTrue(addRecurringTask(taskID, taskSchedule)); |
| | | |
| | | // Make sure recurring task iteration got scheduled. |
| | | long tasksCountAfter = taskBackend.numSubordinates(DN.valueOf( |
| | | "cn=Scheduled Tasks,cn=tasks"), true); |
| | | long tasksCountAfter = taskBackend.getNumberOfEntriesInBaseDN(DN.valueOf("cn=Scheduled Tasks,cn=tasks")); |
| | | assertEquals(tasksCountAfter, tasksCountBefore + 1); |
| | | |
| | | // Perform a modification to update a non-state attribute. |
| | |
| | | assertFalse(DirectoryServer.entryExists(DN.valueOf(taskDN))); |
| | | |
| | | // Make sure recurring task iteration got canceled and removed. |
| | | tasksCountAfter = taskBackend.numSubordinates(DN.valueOf( |
| | | "cn=Scheduled Tasks,cn=tasks"), true); |
| | | tasksCountAfter = taskBackend.getNumberOfEntriesInBaseDN(DN.valueOf("cn=Scheduled Tasks,cn=tasks")); |
| | | assertEquals(tasksCountAfter, tasksCountBefore); |
| | | } |
| | | |