From 8196da2152209c8c628611aeeb4adae32079921e Mon Sep 17 00:00:00 2001
From: Jean-Noël Rouvignac <jean-noel.rouvignac@forgerock.com>
Date: Wed, 25 Nov 2015 15:09:53 +0000
Subject: [PATCH] OPENDJ-2337 Remove old JE local-db backend code and JE changelog once we are happy with the new implementations
---
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VLVIndexPanel.java | 44
opendj-server-legacy/resource/schema/02-config.ldif | 65 -
opendj-server-legacy/src/test/java/org/opends/server/tools/ArgumentParserToolsTestCase.java | 11
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTableModel.java | 47
opendj-server-legacy/tests/unit-tests-testng/resource/config-changes.ldif | 273 ++--
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewVLVIndexPanel.java | 29
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/DBEnvironmentMonitoringPanel.java | 13
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/VLVIndexDescriptor.java | 58 -
opendj-server-legacy/src/test/java/org/opends/server/monitors/BackendMonitorTestCase.java | 22
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewIndexPanel.java | 28
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/IndexPanel.java | 74
opendj-server-legacy/src/test/java/org/opends/server/TestCaseUtils.java | 67 -
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromDirContext.java | 115 -
opendj-server-legacy/src/main/java/org/opends/server/tools/BackendCreationHelper.java | 40
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VerifyIndexPanel.java | 18
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractIndexPanel.java | 48
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractVLVIndexPanel.java | 54
opendj-server-legacy/src/main/java/org/opends/server/tools/BackendTypeHelper.java | 21
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/RebuildIndexPanel.java | 12
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteBaseDNAndBackendTask.java | 32
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/BackendDescriptor.java | 4
/dev/null | 1648 -------------------------------
opendj-server-legacy/src/main/java/org/forgerock/opendj/adapter/server3x/Converters.java | 30
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTypeDescriptor.java | 125 --
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromFile.java | 113 -
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewBaseDNPanel.java | 41
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/StatusGenericPanel.java | 9
opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteIndexTask.java | 30
opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java | 75 -
29 files changed, 349 insertions(+), 2,797 deletions(-)
diff --git a/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBBackendConfiguration.xml b/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBBackendConfiguration.xml
deleted file mode 100644
index b965677..0000000
--- a/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBBackendConfiguration.xml
+++ /dev/null
@@ -1,1093 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ! CDDL HEADER START
- !
- ! The contents of this file are subject to the terms of the
- ! Common Development and Distribution License, Version 1.0 only
- ! (the "License"). You may not use this file except in compliance
- ! with the License.
- !
- ! You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- ! or http://forgerock.org/license/CDDLv1.0.html.
- ! See the License for the specific language governing permissions
- ! and limitations under the License.
- !
- ! When distributing Covered Code, include this CDDL HEADER in each
- ! file and include the License file at legal-notices/CDDLv1_0.txt.
- ! If applicable, add the following below this CDDL HEADER, with the
- ! fields enclosed by brackets "[]" replaced with your own identifying
- ! information:
- ! Portions Copyright [yyyy] [name of copyright owner]
- !
- ! CDDL HEADER END
- !
- !
- ! Copyright 2007-2010 Sun Microsystems, Inc.
- ! Portions Copyright 2010-2015 ForgeRock AS.
- ! -->
-<adm:managed-object name="local-db-backend"
- plural-name="local-db-backends" package="org.forgerock.opendj.server.config"
- extends="backend" xmlns:adm="http://opendj.forgerock.org/admin"
- xmlns:ldap="http://opendj.forgerock.org/admin-ldap"
- xmlns:cli="http://opendj.forgerock.org/admin-cli">
- <adm:synopsis>
- The
- <adm:user-friendly-name />
- uses the Berkeley DB Java Edition to store user-provided data in a local
- repository.
- </adm:synopsis>
- <adm:description>
- It is the traditional "directory server" backend and is similar to
- the backends provided by the Sun Java System Directory Server. The
- <adm:user-friendly-name />
- stores the entries in an encoded form and also provides indexes that
- can be used to quickly locate target entries based on different
- kinds of criteria.
- </adm:description>
- <adm:constraint>
- <adm:synopsis>
- The properties db-txn-no-sync and db-txn-write-no-sync are
- mutually exclusive and cannot be both set at the same time.
- </adm:synopsis>
- <adm:condition>
- <adm:implies>
- <adm:contains property="enabled" value="true" />
- <adm:not>
- <adm:and>
- <adm:contains property="db-txn-no-sync" value="true" />
- <adm:contains property="db-txn-write-no-sync" value="true" />
- </adm:and>
- </adm:not>
- </adm:implies>
- </adm:condition>
- </adm:constraint>
- <adm:profile name="ldap">
- <ldap:object-class>
- <ldap:name>ds-cfg-local-db-backend</ldap:name>
- <ldap:superior>ds-cfg-backend</ldap:superior>
- </ldap:object-class>
- </adm:profile>
- <adm:relation name="local-db-index">
- <adm:one-to-many naming-property="attribute">
- <adm:default-managed-object name="aci">
- <adm:property name="index-type">
- <adm:value>presence</adm:value>
- </adm:property>
- <adm:property name="attribute">
- <adm:value>aci</adm:value>
- </adm:property>
- </adm:default-managed-object>
- <adm:default-managed-object name="entryUUID">
- <adm:property name="index-type">
- <adm:value>equality</adm:value>
- </adm:property>
- <adm:property name="attribute">
- <adm:value>entryUUID</adm:value>
- </adm:property>
- </adm:default-managed-object>
- <adm:default-managed-object name="objectClass">
- <adm:property name="index-type">
- <adm:value>equality</adm:value>
- </adm:property>
- <adm:property name="attribute">
- <adm:value>objectClass</adm:value>
- </adm:property>
- </adm:default-managed-object>
- <adm:default-managed-object name="ds-sync-hist">
- <adm:property name="index-type">
- <adm:value>ordering</adm:value>
- </adm:property>
- <adm:property name="attribute">
- <adm:value>ds-sync-hist</adm:value>
- </adm:property>
- </adm:default-managed-object>
- <adm:default-managed-object name="ds-sync-conflict">
- <adm:property name="index-type">
- <adm:value>equality</adm:value>
- </adm:property>
- <adm:property name="attribute">
- <adm:value>ds-sync-conflict</adm:value>
- </adm:property>
- </adm:default-managed-object>
- </adm:one-to-many>
- <adm:profile name="ldap">
- <ldap:rdn-sequence>cn=Index</ldap:rdn-sequence>
- </adm:profile>
- <adm:profile name="cli">
- <cli:relation>
- <cli:default-property name="index-type" />
- <cli:default-property name="index-entry-limit" />
- <cli:default-property name="index-extensible-matching-rule" />
- </cli:relation>
- </adm:profile>
- </adm:relation>
- <adm:relation name="local-db-vlv-index">
- <adm:one-to-many naming-property="name" />
- <adm:profile name="ldap">
- <ldap:rdn-sequence>cn=VLV Index</ldap:rdn-sequence>
- </adm:profile>
- <adm:profile name="cli">
- <cli:relation>
- <cli:default-property name="base-dn" />
- <cli:default-property name="scope" />
- <cli:default-property name="filter" />
- <cli:default-property name="sort-order" />
- </cli:relation>
- </adm:profile>
- </adm:relation>
- <adm:property-override name="java-class" advanced="true">
- <adm:default-behavior>
- <adm:defined>
- <adm:value>
- org.opends.server.backends.jeb.BackendImpl
- </adm:value>
- </adm:defined>
- </adm:default-behavior>
- </adm:property-override>
- <adm:property-override name="writability-mode">
- <adm:default-behavior>
- <adm:defined>
- <adm:value>enabled</adm:value>
- </adm:defined>
- </adm:default-behavior>
- </adm:property-override>
- <adm:property name="db-directory" mandatory="true">
- <adm:TODO>Default this to the db/backend-id</adm:TODO>
- <adm:synopsis>
- Specifies the path to the filesystem directory that is used
- to hold the Berkeley DB Java Edition database files containing the
- data for this backend.
- </adm:synopsis>
- <adm:description>
- The path may be either an absolute path or a path relative to the
- directory containing the base of the <adm:product-name /> directory server
- installation. The path may be any valid directory path in which
- the server has appropriate permissions to read and write files and
- has sufficient space to hold the database contents.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>db</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:string />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-directory</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="compact-encoding">
- <adm:synopsis>
- Indicates whether the backend should use a compact form when
- encoding entries by compressing the attribute descriptions and
- object class sets.
- </adm:synopsis>
- <adm:description>
- Note that this property applies only to the entries themselves and
- does not impact the index data.
- </adm:description>
- <adm:requires-admin-action>
- <adm:none>
- <adm:synopsis>
- Changes to this setting take effect only for writes that
- occur after the change is made. It is not retroactively
- applied to existing data.
- </adm:synopsis>
- </adm:none>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>true</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-compact-encoding</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="entries-compressed" advanced="true">
- <adm:synopsis>
- Indicates whether the backend should attempt to compress entries
- before storing them in the database.
- </adm:synopsis>
- <adm:description>
- Note that this property applies only to the entries themselves and
- does not impact the index data. Further, the effectiveness of the
- compression is based on the type of data contained in the
- entry.
- </adm:description>
- <adm:requires-admin-action>
- <adm:none>
- <adm:synopsis>
- Changes to this setting take effect only for writes that
- occur after the change is made. It is not retroactively
- applied to existing data.
- </adm:synopsis>
- </adm:none>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>false</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-entries-compressed</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="import-queue-size" advanced="true">
- <adm:synopsis>
- This parameter has been deprecated in OpenDS 2.1 and will be removed
- in <adm:product-name /> 3.0. It is only being kept for migration ease and is ignored
- in OpenDS versions after 2.0.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:none>
- <adm:synopsis>
- This parameter has been deprecated in OpenDS 2.1 and will be removed
- in <adm:product-name /> 3.0. It is only being kept for migration ease and is ignored
- in OpenDS versions after 2.0.
- </adm:synopsis>
- </adm:none>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>100</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" upper-limit="2147483647" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-import-queue-size</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="import-thread-count" advanced="true">
- <adm:synopsis>
- This parameter has been deprecated in OpenDS 2.1 and will be removed
- in <adm:product-name /> 3.0. It is only being kept for migration ease and is ignored
- in OpenDS versions after 2.0.
- </adm:synopsis>
- <adm:description>
- This parameter has been deprecated in OpenDS 2.1 and will be removed
- in <adm:product-name /> 3.0. It is only being kept for migration ease and is ignored
- in OpenDS versions after 2.0.
- </adm:description>
- <adm:requires-admin-action>
- <adm:none>
- <adm:synopsis>
- Changes do not take effect for any import that may already
- be in progress.
- </adm:synopsis>
- </adm:none>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>8</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" upper-limit="2147483647" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-import-thread-count</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="index-entry-limit">
- <adm:synopsis>
- Specifies the maximum number of entries that is allowed to
- match a given index key before that particular index key is no
- longer maintained.
- </adm:synopsis>
- <adm:description>
- This property is analogous to the ALL IDs threshold in the Sun
- Java System Directory Server. Note that this is the default limit
- for the backend, and it may be overridden on a per-attribute
- basis.A value of 0 means there is no limit.
- </adm:description>
- <adm:requires-admin-action>
- <adm:none>
- <adm:synopsis>
- If any index keys have already reached this limit, indexes
- need to be rebuilt before they are allowed to use the
- new limit.
- </adm:synopsis>
- </adm:none>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>4000</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="0" upper-limit="2147483647" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-index-entry-limit</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-directory-permissions" advanced="true">
- <adm:synopsis>
- Specifies the permissions that should be applied to the directory
- containing the server database files.
- </adm:synopsis>
- <adm:description>
- They should be expressed as three-digit octal values, which is the
- traditional representation for UNIX file permissions. The three
- digits represent the permissions that are available for the
- directory's owner, group members, and other users (in that order),
- and each digit is the octal representation of the read, write, and
- execute bits. Note that this only impacts permissions on the
- database directory and not on the files written into that
- directory. On UNIX systems, the user's umask controls
- permissions given to the database files.
- </adm:description>
- <adm:requires-admin-action>
- <adm:server-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>700</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:string>
- <adm:pattern>
- <adm:regex>^7[0-7][0-7]$</adm:regex>
- <adm:usage>MODE</adm:usage>
- <adm:synopsis>
- Any octal value between 700 and 777 (the owner must always
- have read, write, and execute permissions on the directory).
- </adm:synopsis>
- </adm:pattern>
- </adm:string>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-directory-permissions</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="preload-time-limit" advanced="true">
- <adm:synopsis>
- Specifies the length of time that the backend is allowed to
- spend "pre-loading" data when it is initialized.
- </adm:synopsis>
- <adm:description>
- The pre-load process is used to pre-populate the database
- cache, so that it can be more quickly available when the server is
- processing requests. A duration of zero means there is no
- pre-load.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>0s</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:duration base-unit="ms" lower-limit="0" upper-limit="2147483647" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-preload-time-limit</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-cache-percent">
- <adm:synopsis>
- Specifies the percentage of JVM memory to allocate to the database cache.
- </adm:synopsis>
- <adm:description>
- Specifies the percentage of memory available to the JVM that
- should be used for caching database contents. Note that this is
- only used if the value of the db-cache-size property is set to
- "0 MB". Otherwise, the value of that property is used instead
- to control the cache size configuration.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>50</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" upper-limit="90" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-cache-percent</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-cache-size">
- <adm:synopsis>
- The amount of JVM memory to allocate to the database cache.
- </adm:synopsis>
- <adm:description>
- Specifies the amount of memory that should be used for caching
- database contents. A value of "0 MB" indicates that the
- db-cache-percent property should be used instead to specify the
- cache size.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>0 MB</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:size lower-limit="0 MB" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-cache-size</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-cleaner-min-utilization" advanced="true">
- <adm:synopsis>
- Specifies the occupancy percentage for "live" data in this backend's
- database.
- </adm:synopsis>
- <adm:description>
- When the amount of "live" data in the database drops below this value,
- cleaners will act to increase the occupancy percentage by compacting
- the database.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>50</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="0" upper-limit="90" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-cleaner-min-utilization</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-run-cleaner" advanced="true">
- <adm:synopsis>
- Indicates whether the cleaner threads should be enabled to compact
- the database.
- </adm:synopsis>
- <adm:description>
- The cleaner threads are used to periodically compact the
- database when it reaches a percentage of occupancy lower than the
- amount specified by the db-cleaner-min-utilization property. They identify
- database files with a low percentage of live data, and relocate their
- remaining live data to the end of the log.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>true</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-run-cleaner</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-evictor-lru-only" advanced="true">
- <adm:synopsis>
- Indicates whether the database should evict existing data from the
- cache based on an LRU policy (where the least recently used
- information will be evicted first).
- </adm:synopsis>
- <adm:description>
- If set to "false", then the eviction keeps internal nodes of the underlying
- Btree in the cache over leaf nodes, even if the leaf nodes have
- been accessed more recently. This may be a better configuration
- for databases in which only a very small portion of the data is
- cached.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>false</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-evictor-lru-only</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-evictor-nodes-per-scan" advanced="true">
- <adm:synopsis>
- Specifies the number of Btree nodes that should be evicted from
- the cache in a single pass if it is determined that it is
- necessary to free existing data in order to make room for new
- information.
- </adm:synopsis>
- <adm:description>
- Changes to this property do not take effect until the backend is
- restarted. It is recommended that you also change this property
- when you set db-evictor-lru-only to false. This setting controls
- the number of Btree nodes that are considered, or sampled, each
- time a node is evicted. A setting of 10 often produces good
- results, but this may vary from application to application. The
- larger the nodes per scan, the more accurate the algorithm.
- However, don't set it too high. When considering larger numbers of
- nodes for each eviction, the evictor may delay the completion of a
- given database operation, which impacts the response time of the
- application thread. In JE 4.1 and later, setting this value too high
- in an application that is largely CPU bound can reduce the
- effectiveness of cache eviction. It's best to start with the default
- value, and increase it gradually to see if it is beneficial for your
- application.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>10</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" upper-limit="1000" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-evictor-nodes-per-scan</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-evictor-core-threads" advanced="true">
- <adm:synopsis>
- Specifies the core number of threads in the eviction thread pool.
- </adm:synopsis>
- <adm:description>
- Specifies the core number of threads in the eviction thread pool.
- These threads help keep memory usage within cache bounds,
- offloading work from application threads. db-evictor-core-threads,
- db-evictor-max-threads and db-evictor-keep-alive are used to configure
- the core, max and keepalive attributes for the eviction thread pool.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>1</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="0" upper-limit="2147483647" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-evictor-core-threads</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-evictor-max-threads" advanced="true">
- <adm:synopsis>
- Specifies the maximum number of threads in the eviction thread pool.
- </adm:synopsis>
- <adm:description>
- Specifies the maximum number of threads in the eviction thread pool.
- These threads help keep memory usage within cache bounds,
- offloading work from application threads. db-evictor-core-threads,
- db-evictor-max-threads and db-evictor-keep-alive are used to configure
- the core, max and keepalive attributes for the eviction thread pool.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>10</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" upper-limit="2147483647" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-evictor-max-threads</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-evictor-keep-alive" advanced="true">
- <adm:synopsis>
- The duration that excess threads in the eviction thread pool will
- stay idle. After this period, idle threads will terminate.
- </adm:synopsis>
- <adm:description>
- The duration that excess threads in the eviction thread pool will
- stay idle. After this period, idle threads will terminate.
- db-evictor-core-threads, db-evictor-max-threads and
- db-evictor-keep-alive are used to configure the core, max and
- keepalive attributes for the eviction thread pool.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>600s</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:duration base-unit="s" lower-limit="1" upper-limit="86400" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-evictor-keep-alive</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-log-file-max" advanced="true">
- <adm:synopsis>
- Specifies the maximum size for a database log file.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>100mb</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:size lower-limit="1mb" upper-limit="4gib" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-log-file-max</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-log-filecache-size" advanced="true">
- <adm:synopsis>
- Specifies the size of the file handle cache.
- </adm:synopsis>
- <adm:description>
- The file handle cache is used to keep as much opened log files
- as possible. When the cache is smaller than the number of logs,
- the database needs to close some handles and open log files it needs,
- resulting in less optimal performances. Ideally, the size of the cache
- should be higher than the number of files contained in the database.
- Make sure the OS number of open files per process is also tuned
- appropriately.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>100</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="3" upper-limit="2147483647" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-log-filecache-size</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-logging-file-handler-on" advanced="true">
- <adm:synopsis>
- Indicates whether the database should maintain a je.info file in
- the same directory as the database log directory.
- </adm:synopsis>
- <adm:description>
- This file contains information about the internal processing
- performed by the underlying database.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>true</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-logging-file-handler-on</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-logging-level" advanced="true">
- <adm:TODO>Use an enumeration</adm:TODO>
- <adm:synopsis>
- Specifies the log level that should be used by the database
- when it is writing information into the je.info file.
- </adm:synopsis>
- <adm:description>
- The database trace logging level is (in increasing order of
- verbosity) chosen from: OFF, SEVERE, WARNING, INFO, CONFIG, FINE,
- FINER, FINEST, ALL.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>CONFIG</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:string />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-logging-level</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-checkpointer-bytes-interval" advanced="true">
- <adm:synopsis>
- Specifies the maximum number of bytes that may be written to the
- database before it is forced to perform a checkpoint.
- </adm:synopsis>
- <adm:description>
- This can be used to bound the recovery time that may be required
- if the database environment is opened without having been properly
- closed. If this property is set to a non-zero value, the
- checkpointer wakeup interval is not used. To use time-based
- checkpointing, set this property to zero.
- </adm:description>
- <adm:requires-admin-action>
- <adm:server-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>500mb</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:size lower-limit="0b" upper-limit="9223372036854775807b" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-checkpointer-bytes-interval</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-checkpointer-wakeup-interval"
- advanced="true">
- <adm:synopsis>
- Specifies the maximum length of time that may pass between
- checkpoints.
- </adm:synopsis>
- <adm:description>
- Note that this is only used if the value of the checkpointer
- bytes interval is zero.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>30s</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:duration base-unit="s" lower-limit="1" upper-limit="4294" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-checkpointer-wakeup-interval</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-num-lock-tables" advanced="true">
- <adm:synopsis>
- Specifies the number of lock tables that are used by the underlying database.
- </adm:synopsis>
- <adm:description>
- This can be particularly important to help improve scalability by
- avoiding contention on systems with large numbers of CPUs. The
- value of this configuration property should be set to a prime
- number that is less than or equal to the number of worker threads
- configured for use in the server.
- </adm:description>
- <adm:requires-admin-action>
- <adm:component-restart />
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:alias>
- <adm:synopsis>
- Let the server decide.
- </adm:synopsis>
- </adm:alias>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" upper-limit="32767" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-num-lock-tables</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-num-cleaner-threads" advanced="true">
- <adm:synopsis>
- Specifies the number of threads that the backend should maintain
- to keep the database log files at or near the desired utilization.
- </adm:synopsis>
- <adm:description>
- In environments with high write throughput, multiple cleaner
- threads may be required to maintain the desired utilization.
- </adm:description>
- <adm:default-behavior>
- <adm:alias>
- <adm:synopsis>
- Let the server decide.
- </adm:synopsis>
- </adm:alias>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-num-cleaner-threads</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-txn-no-sync" advanced="true">
- <adm:synopsis>
- Indicates whether database writes should be primarily written to
- an internal buffer but not immediately written to disk.
- </adm:synopsis>
- <adm:description>
- Setting the value of this configuration attribute to "true" may
- improve write performance but could cause the most
- recent changes to be lost if the <adm:product-name /> directory server or the
- underlying JVM exits abnormally, or if an OS or hardware failure
- occurs (a behavior similar to running with transaction durability
- disabled in the Sun Java System Directory Server).
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>false</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-txn-no-sync</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="db-txn-write-no-sync" advanced="true">
- <adm:synopsis>
- Indicates whether the database should synchronously flush data as
- it is written to disk.
- </adm:synopsis>
- <adm:description>
- If this value is set to "false", then all data written to disk
- is synchronously flushed to persistent storage and thereby
- providing full durability. If it is set to "true", then data may
- be cached for a period of time by the underlying operating system
- before actually being written to disk. This may improve
- performance, but could cause the most recent
- changes to be lost in the event of an underlying OS or hardware
- failure (but not in the case that the <adm:product-name /> directory server or
- the JVM exits abnormally).
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>true</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-db-txn-write-no-sync</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="disk-low-threshold" advanced="true">
- <adm:synopsis>
- Low disk threshold to limit database updates
- </adm:synopsis>
- <adm:description>
- Specifies the "low" free space on the disk. When the available
- free space on the disk used by this database instance falls below the
- value specified, protocol updates on this database are permitted only
- by a user with the BYPASS_LOCKDOWN privilege.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>200 megabytes</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:size lower-limit="0" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-disk-low-threshold</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="disk-full-threshold" advanced="true">
- <adm:synopsis>
- Full disk threshold to limit database updates
- </adm:synopsis>
- <adm:description>
- When the available free space on the disk used by this database
- instance falls below the value specified, no updates
- are permitted and the server returns an UNWILLING_TO_PERFORM error.
- Updates are allowed again as soon as free space rises above the
- threshold.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>100 megabytes</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:size lower-limit="0" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-disk-full-threshold</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="je-property" advanced="true"
- multi-valued="true">
- <adm:synopsis>
- Specifies the database and environment properties for the Berkeley
- DB Java Edition database serving the data for this backend.
- </adm:synopsis>
- <adm:description>
- Any Berkeley DB Java Edition property can be specified using the
- following form: property-name=property-value. Refer to <adm:product-name />
- documentation for further information on related properties, their
- implications, and range values. The definitive identification of
- all the property parameters is available in the example.properties
- file of Berkeley DB Java Edition distribution.
- </adm:description>
- <adm:default-behavior>
- <adm:undefined />
- </adm:default-behavior>
- <adm:syntax>
- <adm:string />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-je-property</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="index-filter-analyzer-enabled" advanced="true">
- <adm:synopsis>
- Indicates whether to gather statistical information about the search
- filters processed by the directory server while evaluating the usage of
- indexes.
- </adm:synopsis>
- <adm:description>
- Analyzing indexes requires gathering search filter usage patterns from
- user requests, especially for values as specified in the filters and
- subsequently looking the status of those values into the index files.
- When a search requests is processed, internal or user generated, a
- first phase uses indexes to find potential entries to be returned.
- Depending on the search filter, if the index of one of the specified
- attributes matches too many entries (exceeds the index entry limit),
- the search becomes non-indexed. In any case, all entries thus
- gathered (or the entire DIT) are matched against the filter for
- actually returning the search result.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>false</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-index-filter-analyzer-enabled</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="index-filter-analyzer-max-filters" advanced="true">
- <adm:synopsis>
- The maximum number of search filter statistics to keep.
- </adm:synopsis>
- <adm:description>
- When the maximum number of search filter is reached, the least used one
- will be deleted.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>25</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="1" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-index-filter-analyzer-max-filters</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="subordinate-indexes-enabled" advanced="true">
- <adm:synopsis>
- Indicates whether id2children and id2subtree indexes should be used for
- this backend. These indexes are used for constraining filtered searches
- to the search request's scope as well as for generating values for the
- hasSubordinates and numSubordinates virtual attributes.
- </adm:synopsis>
- <adm:description>
- Subordinate indexing is enabled by default and should only be disabled
- for specialized use cases. A typical use case is where the backend is
- to be subjected to heavy add/delete load beneath the same parent entry
- such as when used as a session database. Disabling the subordinate
- indexes means that the numSubordinates and hasSubordinates virtual
- attributes will not be supported.
- </adm:description>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>true</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:boolean />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-subordinate-indexes-enabled</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
-</adm:managed-object>
diff --git a/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBIndexConfiguration.xml b/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBIndexConfiguration.xml
deleted file mode 100644
index 05228f9..0000000
--- a/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBIndexConfiguration.xml
+++ /dev/null
@@ -1,231 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ! CDDL HEADER START
- !
- ! The contents of this file are subject to the terms of the
- ! Common Development and Distribution License, Version 1.0 only
- ! (the "License"). You may not use this file except in compliance
- ! with the License.
- !
- ! You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- ! or http://forgerock.org/license/CDDLv1.0.html.
- ! See the License for the specific language governing permissions
- ! and limitations under the License.
- !
- ! When distributing Covered Code, include this CDDL HEADER in each
- ! file and include the License file at legal-notices/CDDLv1_0.txt.
- ! If applicable, add the following below this CDDL HEADER, with the
- ! fields enclosed by brackets "[]" replaced with your own identifying
- ! information:
- ! Portions Copyright [yyyy] [name of copyright owner]
- !
- ! CDDL HEADER END
- !
- !
- ! Copyright 2007-2009 Sun Microsystems, Inc.
- ! -->
-<adm:managed-object name="local-db-index" plural-name="local-db-indexes"
- package="org.forgerock.opendj.server.config"
- xmlns:adm="http://opendj.forgerock.org/admin"
- xmlns:ldap="http://opendj.forgerock.org/admin-ldap">
- <adm:synopsis>
- <adm:user-friendly-plural-name />
- are used to store information that makes it possible to locate
- entries very quickly when processing search operations.
- </adm:synopsis>
- <adm:description>
- Indexing is performed on a per-attribute level and different types
- of indexing may be performed for different kinds of attributes, based
- on how they are expected to be accessed during search operations.
- </adm:description>
- <adm:tag name="database" />
- <adm:profile name="ldap">
- <ldap:object-class>
- <ldap:name>ds-cfg-local-db-index</ldap:name>
- <ldap:superior>top</ldap:superior>
- </ldap:object-class>
- </adm:profile>
- <adm:property name="attribute" mandatory="true" read-only="true">
- <adm:synopsis>
- Specifies the name of the attribute for which the index is to
- be maintained.
- </adm:synopsis>
- <adm:syntax>
- <adm:attribute-type />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-attribute</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="index-entry-limit">
- <adm:synopsis>
- Specifies the maximum number of entries that are allowed
- to match a given index key before that particular index key is no
- longer maintained.
- </adm:synopsis>
- <adm:description>
- This is analogous to the ALL IDs threshold in the Sun Java System
- Directory Server. If this is specified, its value overrides the JE
- backend-wide configuration. For no limit, use 0 for the value.
- </adm:description>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- If any index keys have already reached this limit, indexes
- must be rebuilt before they will be allowed to use the
- new limit.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:inherited>
- <adm:relative property-name="index-entry-limit" offset="1"
- managed-object-name="local-db-backend" />
- </adm:inherited>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="0" upper-limit="2147483647">
- <adm:unit-synopsis>Number of entries</adm:unit-synopsis>
- </adm:integer>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-index-entry-limit</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="index-type" mandatory="true"
- multi-valued="true">
- <adm:synopsis>
- Specifies the type(s) of indexing that should be performed
- for the associated attribute.
- </adm:synopsis>
- <adm:description>
- For equality, presence, and substring index types, the associated
- attribute type must have a corresponding matching rule.
- </adm:description>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- If any new index types are added for an attribute, and
- values for that attribute already exist in the
- database, the index must be rebuilt before it
- will be accurate.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:syntax>
- <adm:enumeration>
- <adm:value name="equality">
- <adm:synopsis>
- This index type is used to improve the efficiency
- of searches using equality search filters.
- </adm:synopsis>
- </adm:value>
- <adm:value name="ordering">
- <adm:synopsis>
- This index type is used to improve the efficiency
- of searches using "greater than or equal to" or "less then
- or equal to" search filters.
- </adm:synopsis>
- </adm:value>
- <adm:value name="presence">
- <adm:synopsis>
- This index type is used to improve the efficiency
- of searches using the presence search filters.
- </adm:synopsis>
- </adm:value>
- <adm:value name="substring">
- <adm:synopsis>
- This index type is used to improve the efficiency
- of searches using substring search filters.
- </adm:synopsis>
- </adm:value>
- <adm:value name="approximate">
- <adm:synopsis>
- This index type is used to improve the efficiency
- of searches using approximate matching search filters.
- </adm:synopsis>
- </adm:value>
- <adm:value name="extensible">
- <adm:synopsis>
- This index type is used to improve the efficiency
- of searches using extensible matching search filters.
- </adm:synopsis>
- </adm:value>
- </adm:enumeration>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-index-type</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="substring-length" advanced="true">
- <adm:synopsis>
- The length of substrings in a substring index.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- The index must be rebuilt before it will reflect the
- new value.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>6</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer lower-limit="3" />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-substring-length</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="index-extensible-matching-rule" multi-valued="true">
- <adm:synopsis>
- The extensible matching rule in an extensible index.
- </adm:synopsis>
- <adm:description>
- An extensible matching rule must be specified using either LOCALE or OID of the matching rule.
- </adm:description>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- The index must be rebuilt before it will reflect the
- new value.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:alias>
- <adm:synopsis>
- No extensible matching rules will be indexed.
- </adm:synopsis>
- </adm:alias>
- </adm:default-behavior>
- <adm:syntax>
- <adm:string>
- <adm:pattern>
- <adm:regex>([a-z][a-z](-[A-Z][A-Z]){0,2}(.(([a-z]{2,3})|\\d))?)|(^\\d.((\\d)+.)+\\d$)</adm:regex>
- <adm:usage>LOCALE | OID</adm:usage>
- <adm:synopsis>
- A Locale or an OID.
- </adm:synopsis>
- </adm:pattern>
- </adm:string>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-index-extensible-matching-rule</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
-</adm:managed-object>
diff --git a/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBVLVIndexConfiguration.xml b/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBVLVIndexConfiguration.xml
deleted file mode 100644
index 829b1fa..0000000
--- a/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/LocalDBVLVIndexConfiguration.xml
+++ /dev/null
@@ -1,231 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ! CDDL HEADER START
- !
- ! The contents of this file are subject to the terms of the
- ! Common Development and Distribution License, Version 1.0 only
- ! (the "License"). You may not use this file except in compliance
- ! with the License.
- !
- ! You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- ! or http://forgerock.org/license/CDDLv1.0.html.
- ! See the License for the specific language governing permissions
- ! and limitations under the License.
- !
- ! When distributing Covered Code, include this CDDL HEADER in each
- ! file and include the License file at legal-notices/CDDLv1_0.txt.
- ! If applicable, add the following below this CDDL HEADER, with the
- ! fields enclosed by brackets "[]" replaced with your own identifying
- ! information:
- ! Portions Copyright [yyyy] [name of copyright owner]
- !
- ! CDDL HEADER END
- !
- !
- ! Copyright 2007-2008 Sun Microsystems, Inc.
- ! -->
-<adm:managed-object name="local-db-vlv-index"
- plural-name="local-db-vlv-indexes"
- package="org.forgerock.opendj.server.config"
- xmlns:adm="http://opendj.forgerock.org/admin"
- xmlns:ldap="http://opendj.forgerock.org/admin-ldap">
- <adm:synopsis>
- <adm:user-friendly-plural-name />
- are used to store information about a specific search request that
- makes it possible to efficiently process them using the VLV control.
- </adm:synopsis>
- <adm:description>
- A VLV index effectively notifies the server that a virtual list
- view, with specific query and sort parameters, will be performed.
- This index also allows the server to collect and maintain the
- information required to make using the virtual list view faster.
- </adm:description>
- <adm:tag name="database" />
- <adm:profile name="ldap">
- <ldap:object-class>
- <ldap:name>ds-cfg-local-db-vlv-index</ldap:name>
- <ldap:superior>top</ldap:superior>
- </ldap:object-class>
- </adm:profile>
- <adm:property name="base-dn" mandatory="true">
- <adm:synopsis>
- Specifies the base DN used in the search query that is being
- indexed.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- The index must be rebuilt after modifying this
- property.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:syntax>
- <adm:dn />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-base-dn</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="scope" mandatory="true">
- <adm:synopsis>
- Specifies the LDAP scope of the query that is being indexed.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- The index must be rebuilt after modifying this
- property.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:syntax>
- <adm:enumeration>
- <adm:value name="base-object">
- <adm:synopsis>Search the base object only.</adm:synopsis>
- </adm:value>
- <adm:value name="single-level">
- <adm:synopsis>
- Search the immediate children of the base object but do not
- include any of their descendants or the base object itself.
- </adm:synopsis>
- </adm:value>
- <adm:value name="subordinate-subtree">
- <adm:synopsis>
- Search the entire subtree below the base object but do not
- include the base object itself.
- </adm:synopsis>
- </adm:value>
- <adm:value name="whole-subtree">
- <adm:synopsis>
- Search the base object and the entire subtree below the base
- object.
- </adm:synopsis>
- </adm:value>
- </adm:enumeration>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-scope</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="filter" mandatory="true">
- <adm:synopsis>
- Specifies the LDAP filter used in the query that is being indexed.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- The index must be rebuilt after modifying this
- property.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:syntax>
- <adm:string>
- <adm:pattern>
- <adm:regex>.*</adm:regex>
- <adm:usage>STRING</adm:usage>
- <adm:synopsis>
- A valid LDAP search filter.
- </adm:synopsis>
- </adm:pattern>
- </adm:string>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-filter</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="sort-order" mandatory="true">
- <adm:synopsis>
- Specifies the names of the attributes that are used to sort the
- entries for the query being indexed.
- </adm:synopsis>
- <adm:description>
- Multiple attributes can be used to determine the sort order by
- listing the attribute names from highest to lowest precedence.
- Optionally, + or - can be prefixed to the attribute name to sort
- the attribute in ascending order or descending order respectively.
- </adm:description>
- <adm:requires-admin-action>
- <adm:other>
- <adm:synopsis>
- The index must be rebuilt after modifying this
- property.
- </adm:synopsis>
- </adm:other>
- </adm:requires-admin-action>
- <adm:syntax>
- <adm:string>
- <adm:pattern>
- <adm:regex>.*</adm:regex>
- <adm:usage>STRING</adm:usage>
- <adm:synopsis>
- Valid attribute types defined in the schema, separated by a
- space and optionally prefixed by + or -.
- </adm:synopsis>
- </adm:pattern>
- </adm:string>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-sort-order</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="name" mandatory="true" read-only="true">
- <adm:synopsis>
- Specifies a unique name for this VLV index.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:none>
- <adm:synopsis>
- The VLV index name cannot be altered after the index is created.
- </adm:synopsis>
- </adm:none>
- </adm:requires-admin-action>
- <adm:syntax>
- <adm:string />
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-name</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
- <adm:property name="max-block-size" read-only="true"
- advanced="true">
- <adm:synopsis>
- Specifies the number of entry IDs to store in a single sorted
- set before it must be split.
- </adm:synopsis>
- <adm:requires-admin-action>
- <adm:none>
- <adm:synopsis>
- The blocks are resized lazily the next time the index is
- modified.
- </adm:synopsis>
- </adm:none>
- </adm:requires-admin-action>
- <adm:default-behavior>
- <adm:defined>
- <adm:value>4000</adm:value>
- </adm:defined>
- </adm:default-behavior>
- <adm:syntax>
- <adm:integer>
- <adm:unit-synopsis>Number of entry IDs</adm:unit-synopsis>
- </adm:integer>
- </adm:syntax>
- <adm:profile name="ldap">
- <ldap:attribute>
- <ldap:name>ds-cfg-max-block-size</ldap:name>
- </ldap:attribute>
- </adm:profile>
- </adm:property>
-</adm:managed-object>
diff --git a/opendj-server-legacy/resource/schema/02-config.ldif b/opendj-server-legacy/resource/schema/02-config.ldif
index e04673b..10063c4 100644
--- a/opendj-server-legacy/resource/schema/02-config.ldif
+++ b/opendj-server-legacy/resource/schema/02-config.ldif
@@ -2164,12 +2164,6 @@
EQUALITY caseIgnoreMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
X-ORIGIN 'OpenDS Directory Server' )
-attributeTypes: ( 1.3.6.1.4.1.26027.1.1.372
- NAME 'ds-cfg-max-block-size'
- EQUALITY integerMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
- SINGLE-VALUE
- X-ORIGIN 'OpenDS Directory Server' )
attributeTypes: ( 1.3.6.1.4.1.26027.1.1.373
NAME 'ds-cfg-state-update-failure-policy'
EQUALITY caseIgnoreMatch
@@ -3883,54 +3877,6 @@
ds-cfg-enabled $
ds-cfg-writability-mode )
X-ORIGIN 'OpenDS Directory Server' )
-objectClasses: ( 1.3.6.1.4.1.26027.1.2.6
- NAME 'ds-cfg-local-db-backend'
- SUP ds-cfg-backend
- STRUCTURAL
- MUST ds-cfg-db-directory
- MAY ( ds-cfg-index-entry-limit $
- ds-cfg-preload-time-limit $
- ds-cfg-import-thread-count $
- ds-cfg-import-queue-size $
- ds-cfg-entries-compressed $
- ds-cfg-db-directory-permissions $
- ds-cfg-db-cache-percent $
- ds-cfg-db-cache-size $
- ds-cfg-db-txn-no-sync $
- ds-cfg-db-txn-write-no-sync $
- ds-cfg-db-run-cleaner $
- ds-cfg-db-cleaner-min-utilization $
- ds-cfg-db-evictor-lru-only $
- ds-cfg-db-evictor-core-threads $
- ds-cfg-db-evictor-max-threads $
- ds-cfg-db-evictor-keep-alive $
- ds-cfg-db-evictor-nodes-per-scan $
- ds-cfg-db-log-file-max $
- ds-cfg-db-log-filecache-size $
- ds-cfg-db-logging-file-handler-on $
- ds-cfg-db-logging-level $
- ds-cfg-db-checkpointer-bytes-interval $
- ds-cfg-db-checkpointer-wakeup-interval $
- ds-cfg-db-num-lock-tables $
- ds-cfg-db-num-cleaner-threads $
- ds-cfg-compact-encoding $
- ds-cfg-je-property $
- ds-cfg-disk-full-threshold $
- ds-cfg-disk-low-threshold $
- ds-cfg-index-filter-analyzer-enabled $
- ds-cfg-index-filter-analyzer-max-filters $
- ds-cfg-subordinate-indexes-enabled )
- X-ORIGIN 'OpenDS Directory Server' )
-objectClasses: ( 1.3.6.1.4.1.26027.1.2.7
- NAME 'ds-cfg-local-db-index'
- SUP top
- STRUCTURAL
- MUST ( ds-cfg-attribute $
- ds-cfg-index-type )
- MAY ( ds-cfg-index-entry-limit $
- ds-cfg-substring-length $
- ds-cfg-index-extensible-matching-rule )
- X-ORIGIN 'OpenDS Directory Server' )
objectClasses: ( 1.3.6.1.4.1.26027.1.2.8
NAME 'ds-cfg-schema-backend'
SUP ds-cfg-backend
@@ -5047,17 +4993,6 @@
STRUCTURAL
MAY ds-cfg-strip-syntax-min-upper-bound
X-ORIGIN 'OpenDS Directory Server' )
-objectClasses: ( 1.3.6.1.4.1.26027.1.2.114
- NAME 'ds-cfg-local-db-vlv-index'
- SUP top
- STRUCTURAL
- MUST ( ds-cfg-base-dn $
- ds-cfg-scope $
- ds-cfg-filter $
- ds-cfg-sort-order $
- ds-cfg-name )
- MAY ds-cfg-max-block-size
- X-ORIGIN 'OpenDS Directory Server' )
objectClasses: ( 1.3.6.1.4.1.26027.1.2.115
NAME 'ds-cfg-smtp-alert-handler'
SUP ds-cfg-alert-handler
diff --git a/opendj-server-legacy/src/main/java/org/forgerock/opendj/adapter/server3x/Converters.java b/opendj-server-legacy/src/main/java/org/forgerock/opendj/adapter/server3x/Converters.java
index e6b1daa..1f48829 100644
--- a/opendj-server-legacy/src/main/java/org/forgerock/opendj/adapter/server3x/Converters.java
+++ b/opendj-server-legacy/src/main/java/org/forgerock/opendj/adapter/server3x/Converters.java
@@ -60,9 +60,7 @@
import org.forgerock.opendj.ldap.responses.Result;
import org.forgerock.opendj.ldap.responses.SearchResultEntry;
import org.forgerock.opendj.server.config.meta.BackendVLVIndexCfgDefn;
-import org.forgerock.opendj.server.config.meta.LocalDBVLVIndexCfgDefn;
import org.forgerock.util.Reject;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.BindOperation;
import org.opends.server.core.CompareOperation;
import org.opends.server.core.ExtendedOperation;
@@ -783,32 +781,4 @@
throw new IllegalArgumentException("Impossible to convert the unknown scope: " + scope);
}
}
-
- /**
- * Converts from OpenDJ server
- * {@link org.opends.server.admin.std.meta.LocalDBVLVIndexCfgDefn.Scope} to
- * {@link org.forgerock.opendj.server.config.meta.LocalDBVLVIndexCfgDefn.Scope}.
- *
- * @param scope
- * The scope value.
- * @return The converted scope value.
- */
- @RemoveOnceLocalDBBackendIsPluggable
- public static LocalDBVLVIndexCfgDefn.Scope from(
- final org.opends.server.admin.std.meta.LocalDBVLVIndexCfgDefn.Scope scope) {
- Reject.ifNull(scope, "Provided scope to convert is null");
- switch (scope) {
- case BASE_OBJECT:
- return LocalDBVLVIndexCfgDefn.Scope.BASE_OBJECT;
- case SINGLE_LEVEL:
- return LocalDBVLVIndexCfgDefn.Scope.SINGLE_LEVEL;
- case SUBORDINATE_SUBTREE:
- return LocalDBVLVIndexCfgDefn.Scope.SUBORDINATE_SUBTREE;
- case WHOLE_SUBTREE:
- return LocalDBVLVIndexCfgDefn.Scope.WHOLE_SUBTREE;
- default:
- throw new IllegalArgumentException("Impossible to convert the unknown scope: " + scope);
- }
- }
-
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/BackendDescriptor.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/BackendDescriptor.java
index c90efda..e0a94a8 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/BackendDescriptor.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/BackendDescriptor.java
@@ -32,7 +32,6 @@
import java.util.TreeSet;
import org.opends.admin.ads.ADSContext;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
/** The class that describes the backend configuration. */
public class BackendDescriptor
@@ -53,9 +52,6 @@
{
/** The backend is a backup backend. */
BACKUP,
- /** The backend is a local backend. */
- @RemoveOnceLocalDBBackendIsPluggable
- LOCAL_DB,
/** The backend is a LDIF backend. */
LDIF,
/** The backend is a memory backend. */
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTableModel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTableModel.java
index 0e7500c..b3d452a 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTableModel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTableModel.java
@@ -43,7 +43,7 @@
private static final long serialVersionUID = 6979651281772979301L;
- /** {@inheritDoc} */
+ @Override
protected String[] getColumnNames()
{
return new String[] {
@@ -63,6 +63,7 @@
* are equivalent in terms of sorting and -1 if the second descriptor must
* be put before the first descriptor.
*/
+ @Override
public int compare(AbstractIndexDescriptor index1,
AbstractIndexDescriptor index2)
{
@@ -91,7 +92,7 @@
return result;
}
- /** {@inheritDoc} */
+ @Override
protected String[] getLine(AbstractIndexDescriptor index)
{
IndexDescriptor i = (IndexDescriptor)index;
@@ -144,32 +145,11 @@
StringBuilder sb = new StringBuilder();
for (IndexTypeDescriptor type : index.getTypes())
{
- LocalizableMessage v;
- switch (type)
- {
- case SUBSTRING:
- v = INFO_CTRL_PANEL_INDEX_SUBSTRING.get();
- break;
- case ORDERING:
- v = INFO_CTRL_PANEL_INDEX_ORDERING.get();
- break;
- case PRESENCE:
- v = INFO_CTRL_PANEL_INDEX_PRESENCE.get();
- break;
- case EQUALITY:
- v = INFO_CTRL_PANEL_INDEX_EQUALITY.get();
- break;
- case APPROXIMATE:
- v = INFO_CTRL_PANEL_INDEX_APPROXIMATE.get();
- break;
- default:
- throw new RuntimeException("Unknown index type: "+type);
- }
if (sb.length() > 0)
{
sb.append(", ");
}
- sb.append(v);
+ sb.append(getIndexName(type));
}
if (sb.length() == 0)
{
@@ -177,4 +157,23 @@
}
return sb.toString();
}
+
+ private LocalizableMessage getIndexName(IndexTypeDescriptor type)
+ {
+ switch (type)
+ {
+ case SUBSTRING:
+ return INFO_CTRL_PANEL_INDEX_SUBSTRING.get();
+ case ORDERING:
+ return INFO_CTRL_PANEL_INDEX_ORDERING.get();
+ case PRESENCE:
+ return INFO_CTRL_PANEL_INDEX_PRESENCE.get();
+ case EQUALITY:
+ return INFO_CTRL_PANEL_INDEX_EQUALITY.get();
+ case APPROXIMATE:
+ return INFO_CTRL_PANEL_INDEX_APPROXIMATE.get();
+ default:
+ throw new RuntimeException("Unknown index type: "+type);
+ }
+ }
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTypeDescriptor.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTypeDescriptor.java
index cb4128c..64685cb 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTypeDescriptor.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/IndexTypeDescriptor.java
@@ -30,77 +30,64 @@
import java.util.Set;
import org.opends.server.admin.std.meta.BackendIndexCfgDefn;
-import org.opends.server.admin.std.meta.LocalDBIndexCfgDefn;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
+import org.opends.server.util.RemoveOnceNewConfigFrameworkIsUsed;
/**
* Defines the set of values for the index type and provides adaptors to convert
* from/to corresponding configuration classes.
*/
-@RemoveOnceLocalDBBackendIsPluggable
+@RemoveOnceNewConfigFrameworkIsUsed
public enum IndexTypeDescriptor
{
/**
* This index type is used to improve the efficiency of searches using
* approximate matching search filters.
*/
- APPROXIMATE(LocalDBIndexCfgDefn.IndexType.APPROXIMATE, BackendIndexCfgDefn.IndexType.APPROXIMATE,
- org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType.APPROXIMATE,
+ APPROXIMATE(BackendIndexCfgDefn.IndexType.APPROXIMATE,
org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType.APPROXIMATE),
/**
* This index type is used to improve the efficiency of searches using
* equality search filters.
*/
- EQUALITY(LocalDBIndexCfgDefn.IndexType.EQUALITY, BackendIndexCfgDefn.IndexType.EQUALITY,
- org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType.EQUALITY,
+ EQUALITY(BackendIndexCfgDefn.IndexType.EQUALITY,
org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType.EQUALITY),
/**
* This index type is used to improve the efficiency of searches using
* extensible matching search filters.
*/
- EXTENSIBLE(LocalDBIndexCfgDefn.IndexType.EXTENSIBLE, BackendIndexCfgDefn.IndexType.EXTENSIBLE,
- org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType.EXTENSIBLE,
+ EXTENSIBLE(BackendIndexCfgDefn.IndexType.EXTENSIBLE,
org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType.EXTENSIBLE),
/**
* This index type is used to improve the efficiency of searches using
* "greater than or equal to" or "less then or equal to" search filters.
*/
- ORDERING(LocalDBIndexCfgDefn.IndexType.ORDERING, BackendIndexCfgDefn.IndexType.ORDERING,
- org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType.ORDERING,
+ ORDERING(BackendIndexCfgDefn.IndexType.ORDERING,
org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType.ORDERING),
/**
* This index type is used to improve the efficiency of searches using the
* presence search filters.
*/
- PRESENCE(LocalDBIndexCfgDefn.IndexType.PRESENCE, BackendIndexCfgDefn.IndexType.PRESENCE,
- org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType.PRESENCE,
+ PRESENCE(BackendIndexCfgDefn.IndexType.PRESENCE,
org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType.PRESENCE),
/**
* This index type is used to improve the efficiency of searches using
* substring search filters.
*/
- SUBSTRING(LocalDBIndexCfgDefn.IndexType.SUBSTRING, BackendIndexCfgDefn.IndexType.SUBSTRING,
- org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType.SUBSTRING,
+ SUBSTRING(BackendIndexCfgDefn.IndexType.SUBSTRING,
org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType.SUBSTRING);
- private final LocalDBIndexCfgDefn.IndexType oldConfigLocalDBIndexType;
private final BackendIndexCfgDefn.IndexType oldConfigBackendIndexType;
- private final org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType localDBIndexType;
private final org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType backendIndexType;
- private IndexTypeDescriptor(final LocalDBIndexCfgDefn.IndexType oldConfigLocalDBIndexType,
- final BackendIndexCfgDefn.IndexType oldConfigBackendIndexType,
- final org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType localDBIndexType,
+ private IndexTypeDescriptor(final BackendIndexCfgDefn.IndexType oldConfigBackendIndexType,
final org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType backendIndexType)
{
- this.oldConfigLocalDBIndexType = oldConfigLocalDBIndexType;
this.oldConfigBackendIndexType = oldConfigBackendIndexType;
- this.localDBIndexType = localDBIndexType;
this.backendIndexType = backendIndexType;
}
@@ -116,18 +103,6 @@
return oldConfigBackendIndexType;
}
- /**
- * Convert the index type to the equivalent
- * {@code LocalDBIndexCfgDefn.IndexType}.
- *
- * @return The index type to the equivalent
- * {@code LocalDBIndexCfgDefn.IndexType}
- */
- public LocalDBIndexCfgDefn.IndexType toLocalDBIndexType()
- {
- return oldConfigLocalDBIndexType;
- }
-
private static IndexTypeDescriptor fromBackendIndexType(final BackendIndexCfgDefn.IndexType indexType)
{
switch (indexType)
@@ -149,27 +124,6 @@
}
}
- private static IndexTypeDescriptor fromLocalDBIndexType(final LocalDBIndexCfgDefn.IndexType indexType)
- {
- switch (indexType)
- {
- case APPROXIMATE:
- return APPROXIMATE;
- case EQUALITY:
- return EQUALITY;
- case EXTENSIBLE:
- return EXTENSIBLE;
- case ORDERING:
- return ORDERING;
- case PRESENCE:
- return PRESENCE;
- case SUBSTRING:
- return SUBSTRING;
- default:
- throw new IllegalArgumentException("No IndexTypeDescriptor corresponding to: " + indexType);
- }
- }
-
/**
* Convert the provided {@code Set<BackendIndexCfgDefn.IndexType>} to a
* {@code Set<IndexTypeDescriptor>}.
@@ -190,25 +144,6 @@
}
/**
- * Convert the provided {@code Set<LocalDBIndexCfgDefn.IndexType} to a
- * {@code Set<IndexTypeDescriptor>}.
- *
- * @param indexTypes
- * A set of {@code Set<LocalDBIndexCfgDefn.IndexType>}
- * @return A set of {@code Set<IndexTypeDescriptor>} corresponding to the
- * provided {@code Set<LocalDBIndexCfgDefn.IndexType>}
- */
- public static Set<IndexTypeDescriptor> fromLocalDBIndexTypes(final Set<LocalDBIndexCfgDefn.IndexType> indexTypes)
- {
- final Set<IndexTypeDescriptor> indexTypeDescriptors = new LinkedHashSet<>();
- for (final LocalDBIndexCfgDefn.IndexType indexType : indexTypes)
- {
- indexTypeDescriptors.add(fromLocalDBIndexType(indexType));
- }
- return indexTypeDescriptors;
- }
-
- /**
* Convert the provided {@code Set<IndexTypeDescriptor>} to a
* {@code Set<BackendIndexCfgDefn.IndexType>}.
*
@@ -230,48 +165,6 @@
/**
* Convert the provided {@code Set<IndexTypeDescriptor>} to a
- * {@code Set<LocalDBIndexCfgDefn.IndexType>}.
- *
- * @param indexTypeDescriptors
- * A set of {@code Set<IndexTypeDescriptor>}
- * @return A set of {@code Set<LocalDBIndexCfgDefn.IndexType>} corresponding
- * to the provided {@code Set<IndexTypeDescriptor>}
- */
- public static Set<LocalDBIndexCfgDefn.IndexType> toLocalDBIndexTypes(
- final Set<IndexTypeDescriptor> indexTypeDescriptors)
- {
- final Set<LocalDBIndexCfgDefn.IndexType> indexTypes = new LinkedHashSet<>();
- for (final IndexTypeDescriptor indexTypeDescriptor : indexTypeDescriptors)
- {
- indexTypes.add(indexTypeDescriptor.toLocalDBIndexType());
- }
- return indexTypes;
- }
-
- /**
- * Convert the provided {@code Set<IndexTypeDescriptor>} to a
- * {@code Set<org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType>}.
- *
- * @param indexTypeDescriptors
- * A set of {@code Set<IndexTypeDescriptor>}
- * @return A set of
- * {@code Set<org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType>}
- * corresponding to the provided {@code Set<IndexTypeDescriptor>}
- */
- @RemoveOnceLocalDBBackendIsPluggable
- public static Set<org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType> toNewConfigLocalDBIndexTypes(
- final Set<IndexTypeDescriptor> indexTypeDescriptors)
- {
- Set<org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn.IndexType> newConfigIndexTypes = new HashSet<>();
- for (IndexTypeDescriptor indexType : indexTypeDescriptors)
- {
- newConfigIndexTypes.add(indexType.localDBIndexType);
- }
- return newConfigIndexTypes;
- }
-
- /**
- * Convert the provided {@code Set<IndexTypeDescriptor>} to a
* {@code Set<org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType>}.
*
* @param indexTypeDescriptors
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/VLVIndexDescriptor.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/VLVIndexDescriptor.java
index 438854d..34ec927 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/VLVIndexDescriptor.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/datamodel/VLVIndexDescriptor.java
@@ -32,8 +32,6 @@
import org.forgerock.opendj.ldap.SearchScope;
import org.opends.server.admin.std.meta.BackendVLVIndexCfgDefn;
-import org.opends.server.admin.std.meta.LocalDBVLVIndexCfgDefn;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.types.DN;
/**
@@ -201,34 +199,6 @@
}
/**
- * Returns the equivalent {@code LocalDBVLVIndexCfgDefn.Scope} to the provided
- * search scope.
- *
- * @param scope
- * The {@code SearchScope} to convert.
- * @return the equivalent {@code LocalDBVLVIndexCfgDefn.Scope} to the provided
- * search scope.
- */
- @RemoveOnceLocalDBBackendIsPluggable
- public static LocalDBVLVIndexCfgDefn.Scope getLocalDBVLVIndexScope(final SearchScope scope)
- {
- switch (scope.asEnum())
- {
- case BASE_OBJECT:
- return LocalDBVLVIndexCfgDefn.Scope.BASE_OBJECT;
- case SINGLE_LEVEL:
- return LocalDBVLVIndexCfgDefn.Scope.SINGLE_LEVEL;
- case SUBORDINATES:
- return LocalDBVLVIndexCfgDefn.Scope.SUBORDINATE_SUBTREE;
- case WHOLE_SUBTREE:
- return LocalDBVLVIndexCfgDefn.Scope.WHOLE_SUBTREE;
- case UNKNOWN:
- default:
- throw new IllegalArgumentException("Unsupported SearchScope: " + scope);
- }
- }
-
- /**
* Convert the provided {@code BackendVLVIndexCfgDefn.Scope} to
* {@code SearchScope}.
*
@@ -253,32 +223,4 @@
throw new IllegalArgumentException("Unsupported BackendVLVIndexCfgDefn.Scope: " + scope);
}
}
-
- /**
- * Convert the provided {@code LocalDBVLVIndexCfgDefn.Scope} to
- * {@code SearchScope}.
- *
- * @param scope
- * The scope to convert.
- * @return the provided {@code LocalDBVLVIndexCfgDefn.Scope} to
- * {@code SearchScope}
- */
- @RemoveOnceLocalDBBackendIsPluggable
- public static SearchScope toSearchScope(final LocalDBVLVIndexCfgDefn.Scope scope)
- {
- switch (scope)
- {
- case BASE_OBJECT:
- return SearchScope.BASE_OBJECT;
- case SINGLE_LEVEL:
- return SearchScope.SINGLE_LEVEL;
- case SUBORDINATE_SUBTREE:
- return SearchScope.SUBORDINATES;
- case WHOLE_SUBTREE:
- return SearchScope.WHOLE_SUBTREE;
- default:
- throw new IllegalArgumentException("Unsupported LocalDBVLVIndexCfgDefn.Scope: " + scope);
- }
- }
-
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteBaseDNAndBackendTask.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteBaseDNAndBackendTask.java
index 77a6c07..ff500c3 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteBaseDNAndBackendTask.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteBaseDNAndBackendTask.java
@@ -24,7 +24,6 @@
* Copyright 2008-2009 Sun Microsystems, Inc.
* Portions Copyright 2014-2015 ForgeRock AS
*/
-
package org.opends.guitools.controlpanel.task;
import static org.opends.messages.AdminToolMessages.*;
@@ -58,7 +57,7 @@
import org.opends.server.admin.client.ldap.JNDIDirContextAdaptor;
import org.opends.server.admin.client.ldap.LDAPManagementContext;
import org.opends.server.admin.server.ServerManagementContext;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
+import org.opends.server.admin.std.client.PluggableBackendCfgClient;
import org.opends.server.admin.std.client.ReplicationDomainCfgClient;
import org.opends.server.admin.std.client.ReplicationSynchronizationProviderCfgClient;
import org.opends.server.admin.std.client.RootCfgClient;
@@ -129,19 +128,19 @@
this.backendsToDelete.addAll(backendsToDelete);
}
- /** {@inheritDoc} */
+ @Override
public Type getType()
{
return !baseDNsToDelete.isEmpty() ? Type.DELETE_BASEDN : Type.DELETE_BACKEND;
}
- /** {@inheritDoc} */
+ @Override
public Set<String> getBackends()
{
return backendSet;
}
- /** {@inheritDoc} */
+ @Override
public LocalizableMessage getTaskDescription()
{
StringBuilder sb = new StringBuilder();
@@ -198,7 +197,7 @@
return LocalizableMessage.raw(sb.toString());
}
- /** {@inheritDoc} */
+ @Override
public boolean canLaunch(Task taskToBeLaunched,
Collection<LocalizableMessage> incompatibilityReasons)
{
@@ -251,6 +250,7 @@
{
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().appendProgressHtml("<br><br>");
@@ -268,6 +268,7 @@
{
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
List<String> args =
@@ -281,6 +282,7 @@
}
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
if (baseDNs.size() == 1)
@@ -318,6 +320,7 @@
final int fNumberDeleted = numberDeleted;
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().getProgressBar().setIndeterminate(false);
@@ -334,6 +337,7 @@
{
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().appendProgressHtml("<br><br>");
@@ -349,6 +353,7 @@
{
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
List<String> args =
@@ -362,6 +367,7 @@
}
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().appendProgressHtml(
@@ -383,6 +389,7 @@
final int fNumberDeleted = numberDeleted;
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().getProgressBar().setIndeterminate(false);
@@ -468,8 +475,8 @@
ManagementContext mCtx = LDAPManagementContext.createFromContext(
JNDIDirContextAdaptor.adapt(ctx));
RootCfgClient root = mCtx.getRootConfiguration();
- LocalDBBackendCfgClient backend =
- (LocalDBBackendCfgClient)root.getBackend(
+ PluggableBackendCfgClient backend =
+ (PluggableBackendCfgClient)root.getBackend(
baseDNs.iterator().next().getBackend().getBackendID());
SortedSet<DN> oldBaseDNs = backend.getBaseDN();
SortedSet<DN> newBaseDNs = new TreeSet<>(oldBaseDNs);
@@ -513,13 +520,13 @@
root.commit();
}
- /** {@inheritDoc} */
+ @Override
protected String getCommandLinePath()
{
return null;
}
- /** {@inheritDoc} */
+ @Override
protected ArrayList<String> getCommandLineArguments()
{
return new ArrayList<>();
@@ -539,7 +546,7 @@
return null;
}
- /** {@inheritDoc} */
+ @Override
public void runTask()
{
state = State.RUNNING;
@@ -693,6 +700,7 @@
{
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
List<String> args =
@@ -707,6 +715,7 @@
}
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().appendProgressHtml(
@@ -718,6 +727,7 @@
}
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().appendProgressHtml(
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteIndexTask.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteIndexTask.java
index c9decc4..e6545d2 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteIndexTask.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/task/DeleteIndexTask.java
@@ -51,10 +51,8 @@
import org.opends.server.admin.client.ldap.JNDIDirContextAdaptor;
import org.opends.server.admin.client.ldap.LDAPManagementContext;
import org.opends.server.admin.std.client.BackendCfgClient;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
import org.opends.server.admin.std.client.PluggableBackendCfgClient;
import org.opends.server.admin.std.client.RootCfgClient;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.DirectoryServer;
import org.opends.server.types.DN;
import org.opends.server.types.OpenDsException;
@@ -295,14 +293,7 @@
final RootCfgClient root = mCtx.getRootConfiguration();
final BackendCfgClient backend = root.getBackend(index.getBackend().getBackendID());
- if (backend instanceof LocalDBBackendCfgClient)
- {
- removeLocalDBIndex((LocalDBBackendCfgClient) backend, index);
- }
- else
- {
- removeBackendIndex((PluggableBackendCfgClient) backend, index);
- }
+ removeBackendIndex((PluggableBackendCfgClient) backend, index);
backend.commit();
}
@@ -320,21 +311,6 @@
}
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void removeLocalDBIndex(final LocalDBBackendCfgClient backend, final AbstractIndexDescriptor index)
- throws OpenDsException
- {
- final String indexName = index.getName();
- if (isVLVIndex(index))
- {
- backend.removeLocalDBVLVIndex(indexName);
- }
- else
- {
- backend.removeLocalDBIndex(indexName);
- }
- }
-
@Override
protected String getCommandLinePath()
{
@@ -405,11 +381,11 @@
final List<String> args = new ArrayList<>();
if (isVLVIndex(index))
{
- args.add("delete-local-db-vlv-index");
+ args.add("delete-backend-vlv-index");
}
else
{
- args.add("delete-local-db-index");
+ args.add("delete-backend-index");
}
args.add("--backend-name");
args.add(index.getBackend().getBackendID());
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractIndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractIndexPanel.java
index eb012b7..3514e2b 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractIndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractIndexPanel.java
@@ -51,20 +51,15 @@
import org.forgerock.opendj.config.PropertyException;
import org.forgerock.opendj.config.client.ManagementContext;
import org.forgerock.opendj.config.client.ldap.LDAPManagementContext;
-import org.forgerock.opendj.server.config.client.BackendCfgClient;
import org.forgerock.opendj.server.config.client.BackendIndexCfgClient;
-import org.forgerock.opendj.server.config.client.LocalDBBackendCfgClient;
-import org.forgerock.opendj.server.config.client.LocalDBIndexCfgClient;
import org.forgerock.opendj.server.config.client.PluggableBackendCfgClient;
import org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn;
-import org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn;
import org.opends.guitools.controlpanel.datamodel.IndexDescriptor;
import org.opends.guitools.controlpanel.datamodel.IndexTypeDescriptor;
import org.opends.guitools.controlpanel.ui.components.TitlePanel;
import org.opends.guitools.controlpanel.ui.renderer.CustomListCellRenderer;
import org.opends.guitools.controlpanel.util.Utilities;
import org.opends.quicksetup.Installation;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.config.ConfigException;
import org.opends.server.types.AttributeType;
import org.opends.server.types.OpenDsException;
@@ -84,10 +79,10 @@
/** Minimum value for entry limit. */
static final int MIN_ENTRY_LIMIT =
- LocalDBIndexCfgDefn.getInstance().getIndexEntryLimitPropertyDefinition().getLowerLimit();
+ BackendIndexCfgDefn.getInstance().getIndexEntryLimitPropertyDefinition().getLowerLimit();
/** Maximum value for entry limit. */
static final int MAX_ENTRY_LIMIT =
- LocalDBIndexCfgDefn.getInstance().getIndexEntryLimitPropertyDefinition().getUpperLimit();
+ BackendIndexCfgDefn.getInstance().getIndexEntryLimitPropertyDefinition().getUpperLimit();
/** LocalizableMessage to be displayed to indicate that an index is not configurable. */
static final LocalizableMessage NON_CONFIGURABLE_INDEX = INFO_CTRL_PANEL_NON_CONFIGURABLE_INDEX_LABEL.get();
@@ -392,17 +387,9 @@
final LDAPProfile ldapProfile = LDAPProfile.getInstance();
try (ManagementContext context = LDAPManagementContext.newLDIFManagementContext(configFile, ldapProfile))
{
- final BackendCfgClient backend = context.getRootConfiguration().getBackend(backendName);
- if (backend instanceof LocalDBBackendCfgClient)
- {
- updateLocalDBIndexOffline(
- (LocalDBBackendCfgClient) backend, indexToModify, attributeName, indexTypes, indexEntryLimit);
- }
- else
- {
- updateBackendIndexOnline(
- (PluggableBackendCfgClient) backend, indexToModify, attributeName, indexTypes, indexEntryLimit);
- }
+ final PluggableBackendCfgClient backend =
+ (PluggableBackendCfgClient) context.getRootConfiguration().getBackend(backendName);
+ updateBackendIndexOnline(backend, indexToModify, attributeName, indexTypes, indexEntryLimit);
}
catch (final Exception e)
{
@@ -432,29 +419,4 @@
index.commit();
Utilities.throwFirstFrom(exceptions);
}
-
- @RemoveOnceLocalDBBackendIsPluggable
- private void updateLocalDBIndexOffline(final LocalDBBackendCfgClient backend, final IndexDescriptor indexToModify,
- final String attributeName, final Set<IndexTypeDescriptor> indexTypes, final int indexEntryLimit)
- throws Exception
- {
- final boolean isCreation = indexToModify == null;
- final List<PropertyException> exceptions = new ArrayList<>();
- final LocalDBIndexCfgClient index = isCreation
- ? backend.createLocalDBIndex(LocalDBIndexCfgDefn.getInstance(), attributeName, exceptions)
- : backend.getLocalDBIndex(attributeName);
-
- if (isCreation || indexTypes.equals(indexToModify.getTypes()))
- {
- index.setIndexType(IndexTypeDescriptor.toNewConfigLocalDBIndexTypes(indexTypes));
- }
-
- if (indexEntryLimit != index.getIndexEntryLimit())
- {
- index.setIndexEntryLimit(indexEntryLimit);
- }
- index.commit();
- Utilities.throwFirstFrom(exceptions);
- }
-
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractVLVIndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractVLVIndexPanel.java
index 01978cd..3aa77c5 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractVLVIndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/AbstractVLVIndexPanel.java
@@ -69,13 +69,9 @@
import org.forgerock.opendj.config.client.ldap.LDAPManagementContext;
import org.forgerock.opendj.ldap.DN;
import org.forgerock.opendj.ldap.SearchScope;
-import org.forgerock.opendj.server.config.client.BackendCfgClient;
import org.forgerock.opendj.server.config.client.BackendVLVIndexCfgClient;
-import org.forgerock.opendj.server.config.client.LocalDBBackendCfgClient;
-import org.forgerock.opendj.server.config.client.LocalDBVLVIndexCfgClient;
import org.forgerock.opendj.server.config.client.PluggableBackendCfgClient;
import org.forgerock.opendj.server.config.meta.BackendVLVIndexCfgDefn;
-import org.forgerock.opendj.server.config.meta.LocalDBVLVIndexCfgDefn;
import org.opends.guitools.controlpanel.datamodel.BackendDescriptor;
import org.opends.guitools.controlpanel.datamodel.BaseDNDescriptor;
import org.opends.guitools.controlpanel.datamodel.CategorizedComboBoxElement;
@@ -91,7 +87,6 @@
import org.opends.guitools.controlpanel.util.LowerCaseComparator;
import org.opends.guitools.controlpanel.util.Utilities;
import org.opends.quicksetup.Installation;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.config.ConfigException;
import org.opends.server.protocols.ldap.LDAPFilter;
import org.opends.server.types.AttributeType;
@@ -1106,17 +1101,9 @@
final LDAPProfile ldapProfile = LDAPProfile.getInstance();
try (ManagementContext context = LDAPManagementContext.newLDIFManagementContext(configFile, ldapProfile))
{
- final BackendCfgClient backend = context.getRootConfiguration().getBackend(backendName);
- if (backend instanceof LocalDBBackendCfgClient)
- {
- updateLocalDBVLVIndexOffline((LocalDBBackendCfgClient) backend, vlvIndexName, indexToModify, baseDN, filter,
- searchScope, sortOrder);
- }
- else
- {
- updateVLVBackendIndexOnline((PluggableBackendCfgClient) backend, vlvIndexName, indexToModify, baseDN, filter,
- searchScope, sortOrder);
- }
+ final PluggableBackendCfgClient backend =
+ (PluggableBackendCfgClient) context.getRootConfiguration().getBackend(backendName);
+ updateVLVBackendIndexOnline(backend, vlvIndexName, indexToModify, baseDN, filter, searchScope, sortOrder);
}
catch (final Exception e)
{
@@ -1156,39 +1143,4 @@
index.commit();
Utilities.throwFirstFrom(exceptions);
}
-
- @RemoveOnceLocalDBBackendIsPluggable
- private void updateLocalDBVLVIndexOffline(final LocalDBBackendCfgClient backend, final String vlvIndexName,
- final VLVIndexDescriptor indexToModify, final DN baseDN, final String filter, final SearchScope searchScope,
- final List<VLVSortOrder> sortOrder) throws Exception
- {
- final boolean isCreation = indexToModify == null;
- final List<PropertyException> exceptions = new ArrayList<>();
- final LocalDBVLVIndexCfgClient index =
- isCreation ? backend.createLocalDBVLVIndex(LocalDBVLVIndexCfgDefn.getInstance(), vlvIndexName, exceptions)
- : backend.getLocalDBVLVIndex(vlvIndexName);
-
- if (isCreation || !indexToModify.getBaseDN().equals(baseDN))
- {
- index.setBaseDN(baseDN);
- }
-
- if (isCreation || !indexToModify.getFilter().equals(filter))
- {
- index.setFilter(filter);
- }
-
- if (isCreation || !indexToModify.getScope().equals(searchScope))
- {
- index.setScope(Converters.from(VLVIndexDescriptor.getLocalDBVLVIndexScope(searchScope)));
- }
-
- if (isCreation || !indexToModify.getSortOrder().equals(sortOrder))
- {
- index.setSortOrder(getSortOrderStringValue(sortOrder));
- }
- index.commit();
- Utilities.throwFirstFrom(exceptions);
- }
-
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/DBEnvironmentMonitoringPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/DBEnvironmentMonitoringPanel.java
index d42081b..6badb35 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/DBEnvironmentMonitoringPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/DBEnvironmentMonitoringPanel.java
@@ -47,8 +47,7 @@
import javax.swing.table.DefaultTableCellRenderer;
import org.opends.guitools.controlpanel.datamodel.BackendDescriptor;
-import org.opends.guitools.controlpanel.datamodel.
- DBEnvironmentMonitoringTableModel;
+import org.opends.guitools.controlpanel.datamodel.DBEnvironmentMonitoringTableModel;
import org.opends.guitools.controlpanel.datamodel.ServerDescriptor;
import org.opends.guitools.controlpanel.util.Utilities;
import org.opends.server.util.ServerConstants;
@@ -73,16 +72,14 @@
private MonitoringAttributesViewPanel<String> operationViewPanel;
private GenericDialog operationViewDlg;
- /**
- * Default constructor.
- */
+ /** Default constructor. */
public DBEnvironmentMonitoringPanel()
{
super();
createLayout();
}
- /** {@inheritDoc} */
+ @Override
public Component getPreferredFocusComponent()
{
return table;
@@ -114,6 +111,7 @@
Utilities.createButton(INFO_CTRL_PANEL_OPERATIONS_VIEW.get());
showOperations.addActionListener(new ActionListener()
{
+ @Override
public void actionPerformed(ActionEvent ev)
{
operationViewClicked();
@@ -173,8 +171,7 @@
{
for (BackendDescriptor backend : server.getBackends())
{
- if (backend.getType() == BackendDescriptor.Type.LOCAL_DB
- || backend.getType() == BackendDescriptor.Type.PLUGGABLE)
+ if (backend.getType() == BackendDescriptor.Type.PLUGGABLE)
{
dbBackends.add(backend);
if (updateAttributes)
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/IndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/IndexPanel.java
index 95e5b45..8ace834 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/IndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/IndexPanel.java
@@ -24,7 +24,6 @@
* Copyright 2008-2009 Sun Microsystems, Inc.
* Portions Copyright 2014-2015 ForgeRock AS
*/
-
package org.opends.guitools.controlpanel.ui;
import static org.opends.messages.AdminToolMessages.*;
@@ -56,7 +55,6 @@
import javax.swing.event.DocumentListener;
import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
import org.opends.guitools.controlpanel.datamodel.AbstractIndexDescriptor;
import org.opends.guitools.controlpanel.datamodel.ControlPanelInfo;
import org.opends.guitools.controlpanel.datamodel.IndexDescriptor;
@@ -73,10 +71,7 @@
import org.opends.server.admin.client.ldap.LDAPManagementContext;
import org.opends.server.admin.std.client.BackendCfgClient;
import org.opends.server.admin.std.client.BackendIndexCfgClient;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
-import org.opends.server.admin.std.client.LocalDBIndexCfgClient;
import org.opends.server.admin.std.client.PluggableBackendCfgClient;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.DirectoryServer;
import org.opends.server.types.AttributeType;
import org.opends.server.types.DN;
@@ -89,7 +84,6 @@
public class IndexPanel extends AbstractIndexPanel
{
private static final long serialVersionUID = 1439500626486823366L;
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
private IndexDescriptor index;
private ScrollPaneBorderListener scrollListener;
@@ -98,9 +92,7 @@
private ModifyIndexTask newModifyTask;
- /**
- * Default constructor.
- */
+ /** Default constructor. */
public IndexPanel()
{
super();
@@ -156,7 +148,7 @@
buttonPanel.add(deleteIndex, gbc);
deleteIndex.addActionListener(new ActionListener()
{
- /** {@inheritDoc} */
+ @Override
public void actionPerformed(ActionEvent ev)
{
deleteIndex();
@@ -172,6 +164,7 @@
buttonPanel.add(saveChanges, gbc);
saveChanges.addActionListener(new ActionListener()
{
+ @Override
public void actionPerformed(ActionEvent ev)
{
saveIndex(false);
@@ -180,16 +173,19 @@
entryLimit.getDocument().addDocumentListener(new DocumentListener()
{
+ @Override
public void insertUpdate(DocumentEvent ev)
{
checkSaveButton();
}
+ @Override
public void changedUpdate(DocumentEvent ev)
{
checkSaveButton();
}
+ @Override
public void removeUpdate(DocumentEvent ev)
{
checkSaveButton();
@@ -198,6 +194,7 @@
ActionListener listener = new ActionListener()
{
+ @Override
public void actionPerformed(ActionEvent ev)
{
checkSaveButton();
@@ -209,19 +206,19 @@
}
}
- /** {@inheritDoc} */
+ @Override
public LocalizableMessage getTitle()
{
return INFO_CTRL_PANEL_INDEX_PANEL_TITLE.get();
}
- /** {@inheritDoc} */
+ @Override
public Component getPreferredFocusComponent()
{
return entryLimit;
}
- /** {@inheritDoc} */
+ @Override
public void configurationChanged(ConfigurationChangeEvent ev)
{
final ServerDescriptor desc = ev.getNewDescriptor();
@@ -231,6 +228,7 @@
INFO_CTRL_PANEL_CANNOT_CONNECT_TO_REMOTE_DETAILS.get(desc.getHostname()));
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
checkSaveButton();
@@ -239,7 +237,7 @@
});
}
- /** {@inheritDoc} */
+ @Override
public void okClicked()
{
}
@@ -429,9 +427,9 @@
JComponent[] comps = {entryLimit, lType, typesPanel, lEntryLimit};
- for (int i = 0; i < comps.length; i++)
+ for (JComponent comp : comps)
{
- comps[i].setVisible(!index.isDatabaseIndex());
+ comp.setVisible(!index.isDatabaseIndex());
}
AttributeType attr = index.getAttributeType();
@@ -516,26 +514,26 @@
indexToModify = index;
}
- /** {@inheritDoc} */
+ @Override
public Type getType()
{
return Type.MODIFY_INDEX;
}
- /** {@inheritDoc} */
+ @Override
public Set<String> getBackends()
{
return backendSet;
}
- /** {@inheritDoc} */
+ @Override
public LocalizableMessage getTaskDescription()
{
return INFO_CTRL_PANEL_MODIFY_INDEX_TASK_DESCRIPTION.get(attributeName,
backendName);
}
- /** {@inheritDoc} */
+ @Override
public boolean canLaunch(Task taskToBeLaunched, Collection<LocalizableMessage> incompatibilityReasons)
{
boolean canLaunch = true;
@@ -582,6 +580,7 @@
{
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
StringBuilder sb = new StringBuilder();
@@ -597,6 +596,7 @@
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().appendProgressHtml(
@@ -617,6 +617,7 @@
SwingUtilities.invokeLater(new Runnable()
{
+ @Override
public void run()
{
getProgressDialog().appendProgressHtml(
@@ -646,11 +647,6 @@
{
final ManagementContext mCtx = LDAPManagementContext.createFromContext(JNDIDirContextAdaptor.adapt(ctx));
final BackendCfgClient backend = mCtx.getRootConfiguration().getBackend(backendName);
- if (backend instanceof LocalDBBackendCfgClient)
- {
- modifyLocalDBIndexOnline((LocalDBBackendCfgClient) backend);
- return;
- }
modifyBackendIndexOnline((PluggableBackendCfgClient) backend);
}
@@ -669,29 +665,13 @@
index.commit();
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void modifyLocalDBIndexOnline(final LocalDBBackendCfgClient backend) throws OpenDsException
- {
- final LocalDBIndexCfgClient index = backend.getLocalDBIndex(attributeName);
- if (!indexTypes.equals(indexToModify.getTypes()))
- {
- index.setIndexType(IndexTypeDescriptor.toLocalDBIndexTypes(indexTypes));
- }
-
- if (entryLimitValue != index.getIndexEntryLimit())
- {
- index.setIndexEntryLimit(entryLimitValue);
- }
- index.commit();
- }
-
- /** {@inheritDoc} */
+ @Override
protected String getCommandLinePath()
{
return null;
}
- /** {@inheritDoc} */
+ @Override
protected ArrayList<String> getCommandLineArguments()
{
return new ArrayList<>();
@@ -716,7 +696,7 @@
}
}
- /** {@inheritDoc} */
+ @Override
public void runTask()
{
state = State.RUNNING;
@@ -740,7 +720,7 @@
}
}
- /** {@inheritDoc} */
+ @Override
public void postOperation()
{
if (lastException == null && state == State.FINISHED_SUCCESSFULLY)
@@ -752,7 +732,7 @@
private List<String> getDSConfigCommandLineArguments()
{
List<String> args = new ArrayList<>();
- args.add("set-local-db-index-prop");
+ args.add("set-backend-index-prop");
args.add("--backend-name");
args.add(backendName);
@@ -787,7 +767,7 @@
for (IndexTypeDescriptor newType : toAdd)
{
args.add("--add");
- args.add("index-type:" + newType.toLocalDBIndexType());
+ args.add("index-type:" + newType.toBackendIndexType());
}
}
if (entryLimitValue != indexToModify.getEntryLimit())
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewBaseDNPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewBaseDNPanel.java
index 68e9879..32d5f32 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewBaseDNPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewBaseDNPanel.java
@@ -90,15 +90,11 @@
import org.opends.server.admin.client.ldap.LDAPManagementContext;
import org.opends.server.admin.std.client.BackendCfgClient;
import org.opends.server.admin.std.client.BackendIndexCfgClient;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
-import org.opends.server.admin.std.client.LocalDBIndexCfgClient;
import org.opends.server.admin.std.client.PluggableBackendCfgClient;
import org.opends.server.admin.std.client.RootCfgClient;
import org.opends.server.admin.std.meta.BackendCfgDefn;
import org.opends.server.admin.std.meta.BackendIndexCfgDefn;
import org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType;
-import org.opends.server.admin.std.meta.LocalDBIndexCfgDefn;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.DirectoryServer;
import org.opends.server.extensions.ConfigFileHandler;
import org.opends.server.tools.BackendCreationHelper;
@@ -1048,36 +1044,10 @@
final String backendName = getBackendName();
displayCreateAdditionalIndexesDsConfigCmdLine();
final RootCfgClient root = getRootConfigurationClient();
- if (isLocalDBBackend())
- {
- addJEDefaultIndexes((LocalDBBackendCfgClient) root.getBackend(backendName));
- }
- else
- {
- addBackendDefaultIndexes((PluggableBackendCfgClient) root.getBackend(backendName));
- }
+ addBackendDefaultIndexes((PluggableBackendCfgClient) root.getBackend(backendName));
displayCreateAdditionalIndexesDone();
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void addJEDefaultIndexes(final LocalDBBackendCfgClient jeBackendCfgClient) throws AdminException
- {
- for (DefaultIndex defaultIndex : BackendCreationHelper.DEFAULT_INDEXES)
- {
- final LocalDBIndexCfgClient jeIndex =
- jeBackendCfgClient.createLocalDBIndex(LocalDBIndexCfgDefn.getInstance(), defaultIndex.getName(), null);
-
- final List<LocalDBIndexCfgDefn.IndexType> indexTypes = new LinkedList<>();
- indexTypes.add(LocalDBIndexCfgDefn.IndexType.EQUALITY);
- if (defaultIndex.shouldCreateSubstringIndex())
- {
- indexTypes.add(LocalDBIndexCfgDefn.IndexType.SUBSTRING);
- }
- jeIndex.setIndexType(indexTypes);
- jeIndex.commit();
- }
- }
-
private void addBackendDefaultIndexes(PluggableBackendCfgClient backendCfgClient) throws AdminException
{
for (DefaultIndex defaultIndex : BackendCreationHelper.DEFAULT_INDEXES)
@@ -1171,7 +1141,7 @@
private List<String> getCreateIndexCommandLineArguments(final DefaultIndex defaultIndex)
{
final List<String> args = new ArrayList<>();
- args.add(isLocalDBBackend() ? "create-local-db-index" : "create-backend-index");
+ args.add("create-backend-index");
args.add("--backend-name");
args.add(getBackendName());
args.add("--type");
@@ -1205,13 +1175,6 @@
});
}
- @RemoveOnceLocalDBBackendIsPluggable
- private boolean isLocalDBBackend()
- {
- return getSelectedBackendType().getBackend()
- instanceof org.forgerock.opendj.server.config.meta.LocalDBBackendCfgDefn;
- }
-
/**
* Creates the data in the new base DN.
*
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewIndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewIndexPanel.java
index 2ac6454..8c39258 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewIndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewIndexPanel.java
@@ -63,12 +63,8 @@
import org.opends.server.admin.client.ldap.LDAPManagementContext;
import org.opends.server.admin.std.client.BackendCfgClient;
import org.opends.server.admin.std.client.BackendIndexCfgClient;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
-import org.opends.server.admin.std.client.LocalDBIndexCfgClient;
import org.opends.server.admin.std.client.PluggableBackendCfgClient;
import org.opends.server.admin.std.meta.BackendIndexCfgDefn;
-import org.opends.server.admin.std.meta.LocalDBIndexCfgDefn;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.DirectoryServer;
import org.opends.server.types.AttributeType;
import org.opends.server.types.DN;
@@ -479,11 +475,6 @@
{
final ManagementContext mCtx = LDAPManagementContext.createFromContext(JNDIDirContextAdaptor.adapt(ctx));
final BackendCfgClient backend = mCtx.getRootConfiguration().getBackend(backendName.getText());
- if (backend instanceof LocalDBBackendCfgClient)
- {
- createLocalDBIndexOnline((LocalDBBackendCfgClient) backend);
- return;
- }
createBackendIndexOnline((PluggableBackendCfgClient) backend);
}
@@ -501,21 +492,6 @@
Utilities.throwFirstFrom(exceptions);
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void createLocalDBIndexOnline(final LocalDBBackendCfgClient backend) throws OpenDsException
- {
- final List<PropertyException> exceptions = new ArrayList<>();
- final LocalDBIndexCfgClient index = backend.createLocalDBIndex(
- LocalDBIndexCfgDefn.getInstance(), attributeName, exceptions);
- index.setIndexType(IndexTypeDescriptor.toLocalDBIndexTypes(indexTypes));
- if (entryLimitValue != index.getIndexEntryLimit())
- {
- index.setIndexEntryLimit(entryLimitValue);
- }
- index.commit();
- Utilities.throwFirstFrom(exceptions);
- }
-
@Override
protected String getCommandLinePath()
{
@@ -578,7 +554,7 @@
private ArrayList<String> getDSConfigCommandLineArguments()
{
ArrayList<String> args = new ArrayList<>();
- args.add("create-local-db-index");
+ args.add("create-backend-index");
args.add("--backend-name");
args.add(backendName.getText());
args.add("--type");
@@ -590,7 +566,7 @@
for (IndexTypeDescriptor type : indexTypes)
{
args.add("--set");
- args.add("index-type:" + type.toLocalDBIndexType());
+ args.add("index-type:" + type.toBackendIndexType());
}
args.add("--set");
args.add("index-entry-limit:" + entryLimitValue);
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewVLVIndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewVLVIndexPanel.java
index 10bf54e..bd3e309 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewVLVIndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/NewVLVIndexPanel.java
@@ -59,12 +59,8 @@
import org.opends.server.admin.client.ldap.LDAPManagementContext;
import org.opends.server.admin.std.client.BackendCfgClient;
import org.opends.server.admin.std.client.BackendVLVIndexCfgClient;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
-import org.opends.server.admin.std.client.LocalDBVLVIndexCfgClient;
import org.opends.server.admin.std.client.PluggableBackendCfgClient;
import org.opends.server.admin.std.meta.BackendVLVIndexCfgDefn;
-import org.opends.server.admin.std.meta.LocalDBVLVIndexCfgDefn;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.DirectoryServer;
import org.opends.server.types.DN;
import org.opends.server.types.OpenDsException;
@@ -305,12 +301,6 @@
{
final ManagementContext mCtx = LDAPManagementContext.createFromContext(JNDIDirContextAdaptor.adapt(ctx));
final BackendCfgClient backend = mCtx.getRootConfiguration().getBackend(backendName.getText());
-
- if (backend instanceof LocalDBBackendCfgClient)
- {
- createLocalDBVLVIndexOnline((LocalDBBackendCfgClient) backend);
- return;
- }
createBackendVLVIndexOnline((PluggableBackendCfgClient) backend);
}
@@ -328,21 +318,6 @@
Utilities.throwFirstFrom(exceptions);
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void createLocalDBVLVIndexOnline(final LocalDBBackendCfgClient backend) throws OpenDsException
- {
- final List<PropertyException> exceptions = new ArrayList<>();
- final LocalDBVLVIndexCfgClient index =
- backend.createLocalDBVLVIndex(LocalDBVLVIndexCfgDefn.getInstance(), name.getText(), exceptions);
-
- index.setFilter(filterValue);
- index.setSortOrder(sortOrderStringValue);
- index.setBaseDN(DN.valueOf(getBaseDN()));
- index.setScope(VLVIndexDescriptor.getLocalDBVLVIndexScope(getScope()));
- index.commit();
- Utilities.throwFirstFrom(exceptions);
- }
-
@Override
protected String getCommandLinePath()
{
@@ -408,7 +383,7 @@
private List<String> getDSConfigCommandLineArguments()
{
final List<String> args = new ArrayList<>();
- args.add("create-local-db-vlv-index");
+ args.add("create-backend-vlv-index");
args.add("--backend-name");
args.add(backendID);
args.add("--type");
@@ -424,7 +399,7 @@
args.add("filter:" + filterValue);
args.add("--set");
- args.add("scope:" + VLVIndexDescriptor.getLocalDBVLVIndexScope(searchScope));
+ args.add("scope:" + VLVIndexDescriptor.getBackendVLVIndexScope(searchScope));
args.add("--set");
args.add("sort-order:" + sortOrderStringValue);
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/RebuildIndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/RebuildIndexPanel.java
index 1c703fe..12ca744 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/RebuildIndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/RebuildIndexPanel.java
@@ -87,14 +87,12 @@
createLayout();
}
- /** {@inheritDoc} */
@Override
public void indexModified(final IndexModifiedEvent ev)
{
refreshContents(getInfo().getServerDescriptor());
}
- /** {@inheritDoc} */
@Override
public void backendIndexesModified(final IndexModifiedEvent ev)
{
@@ -168,7 +166,6 @@
addBottomGlue(gbc);
}
- /** {@inheritDoc} */
@Override
public void setInfo(final ControlPanelInfo info)
{
@@ -179,21 +176,18 @@
info.addIndexModifiedListener(this);
}
- /** {@inheritDoc} */
@Override
public LocalizableMessage getTitle()
{
return INFO_CTRL_PANEL_REBUILD_INDEXES_TITLE.get();
}
- /** {@inheritDoc} */
@Override
public Component getPreferredFocusComponent()
{
return baseDNs;
}
- /** {@inheritDoc} */
@Override
public void configurationChanged(final ConfigurationChangeEvent ev)
{
@@ -269,7 +263,6 @@
return true;
}
- /** {@inheritDoc} */
@Override
public void cancelClicked()
{
@@ -278,7 +271,6 @@
super.cancelClicked();
}
- /** {@inheritDoc} */
@Override
public void okClicked()
{
@@ -346,12 +338,10 @@
}
}
- /** {@inheritDoc} */
@Override
protected boolean displayBackend(final BackendDescriptor backend)
{
- return !backend.isConfigBackend() && (backend.getType() == BackendDescriptor.Type.LOCAL_DB
- || backend.getType() == BackendDescriptor.Type.PLUGGABLE);
+ return !backend.isConfigBackend() && backend.getType() == BackendDescriptor.Type.PLUGGABLE;
}
private String getSelectedBaseDN()
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/StatusGenericPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/StatusGenericPanel.java
index 15f0f47..369dbd8 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/StatusGenericPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/StatusGenericPanel.java
@@ -26,6 +26,9 @@
*/
package org.opends.guitools.controlpanel.ui;
+import static org.opends.guitools.controlpanel.ui.ControlCenterMainPane.*;
+import static org.opends.messages.AdminToolMessages.*;
+
import java.awt.CardLayout;
import java.awt.Color;
import java.awt.Component;
@@ -107,9 +110,6 @@
import org.opends.server.util.ServerConstants;
import org.opends.server.util.StaticUtils;
-import static org.opends.guitools.controlpanel.ui.ControlCenterMainPane.*;
-import static org.opends.messages.AdminToolMessages.*;
-
/**
* An abstract class that contains a number of methods that are shared by all
* the inheriting classes. In general a StatusGenericPanel is contained in a
@@ -1434,8 +1434,7 @@
Set<String> dns = new HashSet<>();
for (BackendDescriptor backend : desc.getBackends())
{
- if (backend.getType() == BackendDescriptor.Type.LOCAL_DB
- || backend.getType() == BackendDescriptor.Type.PLUGGABLE)
+ if (backend.getType() == BackendDescriptor.Type.PLUGGABLE)
{
for (BaseDNDescriptor baseDN : backend.getBaseDns())
{
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VLVIndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VLVIndexPanel.java
index 3bdec8d..36ef0cf 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VLVIndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VLVIndexPanel.java
@@ -77,13 +77,9 @@
import org.opends.server.admin.client.ManagementContext;
import org.opends.server.admin.client.ldap.JNDIDirContextAdaptor;
import org.opends.server.admin.client.ldap.LDAPManagementContext;
-import org.opends.server.admin.std.client.BackendCfgClient;
import org.opends.server.admin.std.client.BackendVLVIndexCfgClient;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
-import org.opends.server.admin.std.client.LocalDBVLVIndexCfgClient;
import org.opends.server.admin.std.client.PluggableBackendCfgClient;
import org.opends.server.admin.std.client.RootCfgClient;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.DirectoryServer;
import org.opends.server.types.DN;
import org.opends.server.types.OpenDsException;
@@ -701,14 +697,7 @@
{
final ManagementContext mCtx = LDAPManagementContext.createFromContext(JNDIDirContextAdaptor.adapt(ctx));
final RootCfgClient root = mCtx.getRootConfiguration();
- final BackendCfgClient backend = root.getBackend(backendID);
-
- if (backend instanceof LocalDBBackendCfgClient)
- {
- modifyLocalDBVLVIndexOnline((LocalDBBackendCfgClient) backend);
- return;
- }
- modifyBackendVLVIndexOnline((PluggableBackendCfgClient) backend);
+ modifyBackendVLVIndexOnline((PluggableBackendCfgClient) root.getBackend(backendID));
}
private void modifyBackendVLVIndexOnline(final PluggableBackendCfgClient backend) throws OpenDsException
@@ -737,33 +726,6 @@
index.commit();
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void modifyLocalDBVLVIndexOnline(final LocalDBBackendCfgClient backend) throws OpenDsException
- {
- final LocalDBVLVIndexCfgClient index = backend.getLocalDBVLVIndex(indexName);
- final DN b = DN.valueOf(baseDN);
- if (!indexToModify.getBaseDN().equals(b))
- {
- index.setBaseDN(b);
- }
-
- if (!indexToModify.getFilter().equals(filterValue))
- {
- index.setFilter(filterValue);
- }
-
- if (indexToModify.getScope() != searchScope)
- {
- index.setScope(VLVIndexDescriptor.getLocalDBVLVIndexScope(searchScope));
- }
-
- if (!indexToModify.getSortOrder().equals(sortOrder))
- {
- index.setSortOrder(sortOrderStringValue);
- }
- index.commit();
- }
-
@Override
protected String getCommandLinePath()
{
@@ -818,7 +780,7 @@
private List<String> getDSConfigCommandLineArguments()
{
final List<String> args = new ArrayList<>();
- args.add("set-local-db-vlv-index-prop");
+ args.add("set-backend-vlv-index-prop");
args.add("--backend-name");
args.add(backendID);
@@ -842,7 +804,7 @@
if (indexToModify.getScope() != searchScope)
{
args.add("--set");
- args.add("scope:" + VLVIndexDescriptor.getLocalDBVLVIndexScope(searchScope));
+ args.add("scope:" + VLVIndexDescriptor.getBackendVLVIndexScope(searchScope));
}
if (!indexToModify.getFilter().equals(filterValue))
{
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VerifyIndexPanel.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VerifyIndexPanel.java
index 19a1906..21bfa57 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VerifyIndexPanel.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/ui/VerifyIndexPanel.java
@@ -98,7 +98,6 @@
createLayout();
}
- /** {@inheritDoc} */
@Override
public void setInfo(ControlPanelInfo info)
{
@@ -109,14 +108,12 @@
info.addIndexModifiedListener(this);
}
- /** {@inheritDoc} */
@Override
public void indexModified(IndexModifiedEvent ev)
{
refreshContents(getInfo().getServerDescriptor());
}
- /** {@inheritDoc} */
@Override
public void backendIndexesModified(IndexModifiedEvent ev)
{
@@ -242,21 +239,18 @@
listener.itemStateChanged(null);
}
- /** {@inheritDoc} */
@Override
public LocalizableMessage getTitle()
{
return INFO_CTRL_PANEL_VERIFY_INDEXES_PANEL_TITLE.get();
}
- /** {@inheritDoc} */
@Override
public Component getPreferredFocusComponent()
{
return baseDNs;
}
- /** {@inheritDoc} */
@Override
public void configurationChanged(ConfigurationChangeEvent ev)
{
@@ -305,7 +299,6 @@
});
}
- /** {@inheritDoc} */
@Override
public void cancelClicked()
{
@@ -315,7 +308,6 @@
super.cancelClicked();
}
- /** {@inheritDoc} */
@Override
public void okClicked()
{
@@ -388,12 +380,10 @@
}
}
- /** {@inheritDoc} */
@Override
protected boolean displayBackend(BackendDescriptor backend)
{
- return !backend.isConfigBackend() && (backend.getType() == BackendDescriptor.Type.LOCAL_DB
- || backend.getType() == BackendDescriptor.Type.PLUGGABLE);
+ return !backend.isConfigBackend() && backend.getType() == BackendDescriptor.Type.PLUGGABLE;
}
private String getSelectedBaseDN()
@@ -479,21 +469,18 @@
this.baseDN = getSelectedBaseDN();
}
- /** {@inheritDoc} */
@Override
public Type getType()
{
return Type.VERIFY_INDEXES;
}
- /** {@inheritDoc} */
@Override
public LocalizableMessage getTaskDescription()
{
return INFO_CTRL_PANEL_VERIFY_INDEX_TASK_DESCRIPTION.get(baseDN);
}
- /** {@inheritDoc} */
@Override
public boolean canLaunch(Task taskToBeLaunched, Collection<LocalizableMessage> incompatibilityReasons)
{
@@ -517,7 +504,6 @@
return canLaunch;
}
- /** {@inheritDoc} */
@Override
public void runTask()
{
@@ -538,7 +524,6 @@
}
}
- /** {@inheritDoc} */
@Override
protected List<String> getCommandLineArguments()
{
@@ -577,7 +562,6 @@
return index.getName();
}
- /** {@inheritDoc} */
@Override
protected String getCommandLinePath()
{
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromDirContext.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromDirContext.java
index 6127f13..e9c9ec3 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromDirContext.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromDirContext.java
@@ -82,9 +82,6 @@
import org.opends.server.admin.std.client.LDAPConnectionHandlerCfgClient;
import org.opends.server.admin.std.client.LDIFBackendCfgClient;
import org.opends.server.admin.std.client.LDIFConnectionHandlerCfgClient;
-import org.opends.server.admin.std.client.LocalDBBackendCfgClient;
-import org.opends.server.admin.std.client.LocalDBIndexCfgClient;
-import org.opends.server.admin.std.client.LocalDBVLVIndexCfgClient;
import org.opends.server.admin.std.client.MemoryBackendCfgClient;
import org.opends.server.admin.std.client.MonitorBackendCfgClient;
import org.opends.server.admin.std.client.PluggableBackendCfgClient;
@@ -96,7 +93,6 @@
import org.opends.server.admin.std.client.RootDNUserCfgClient;
import org.opends.server.admin.std.client.SNMPConnectionHandlerCfgClient;
import org.opends.server.admin.std.client.TaskBackendCfgClient;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.config.ConfigConstants;
import org.opends.server.core.DirectoryServer;
import org.opends.server.tools.tasks.TaskEntry;
@@ -417,41 +413,11 @@
}
Set<IndexDescriptor> indexes = new HashSet<>();
Set<VLVIndexDescriptor> vlvIndexes = new HashSet<>();
- BackendDescriptor.Type type;
- if (backend instanceof LocalDBBackendCfgClient)
+ BackendDescriptor.Type type = getBackendType(backend);
+ if (type == BackendDescriptor.Type.PLUGGABLE)
{
- type = BackendDescriptor.Type.LOCAL_DB;
- refreshLocalDBBackendConfig(indexes, vlvIndexes, backend, errors);
- }
- else if (backend instanceof PluggableBackendCfgClient)
- {
- type = BackendDescriptor.Type.PLUGGABLE;
refreshBackendConfig(indexes, vlvIndexes, backend, errors);
}
- else if (backend instanceof LDIFBackendCfgClient)
- {
- type = BackendDescriptor.Type.LDIF;
- }
- else if (backend instanceof MemoryBackendCfgClient)
- {
- type = BackendDescriptor.Type.MEMORY;
- }
- else if (backend instanceof BackupBackendCfgClient)
- {
- type = BackendDescriptor.Type.BACKUP;
- }
- else if (backend instanceof MonitorBackendCfgClient)
- {
- type = BackendDescriptor.Type.MONITOR;
- }
- else if (backend instanceof TaskBackendCfgClient)
- {
- type = BackendDescriptor.Type.TASK;
- }
- else
- {
- type = BackendDescriptor.Type.OTHER;
- }
BackendDescriptor desc = new BackendDescriptor(
backend.getBackendId(), baseDNs, indexes, vlvIndexes, -1, backend.isEnabled(), type);
@@ -476,6 +442,38 @@
}
}
+ private BackendDescriptor.Type getBackendType(BackendCfgClient backend)
+ {
+ if (backend instanceof PluggableBackendCfgClient)
+ {
+ return BackendDescriptor.Type.PLUGGABLE;
+ }
+ else if (backend instanceof LDIFBackendCfgClient)
+ {
+ return BackendDescriptor.Type.LDIF;
+ }
+ else if (backend instanceof MemoryBackendCfgClient)
+ {
+ return BackendDescriptor.Type.MEMORY;
+ }
+ else if (backend instanceof BackupBackendCfgClient)
+ {
+ return BackendDescriptor.Type.BACKUP;
+ }
+ else if (backend instanceof MonitorBackendCfgClient)
+ {
+ return BackendDescriptor.Type.MONITOR;
+ }
+ else if (backend instanceof TaskBackendCfgClient)
+ {
+ return BackendDescriptor.Type.TASK;
+ }
+ else
+ {
+ return BackendDescriptor.Type.OTHER;
+ }
+ }
+
private void refreshBackendConfig(final Set<IndexDescriptor> indexes,
final Set<VLVIndexDescriptor> vlvIndexes, final BackendCfgClient backend, final List<OpenDsException> errors)
{
@@ -525,51 +523,6 @@
}
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void refreshLocalDBBackendConfig(final Set<IndexDescriptor> indexes,
- final Set<VLVIndexDescriptor> vlvIndexes, final BackendCfgClient backend, final List<OpenDsException> errors)
- {
- LocalDBBackendCfgClient localDBBackend = (LocalDBBackendCfgClient)backend;
- try
- {
- for (String indexName : localDBBackend.listLocalDBIndexes())
- {
- LocalDBIndexCfgClient index = localDBBackend.getLocalDBIndex(indexName);
- indexes.add(new IndexDescriptor(
- index.getAttribute().getNameOrOID(), index.getAttribute(),
- null, IndexTypeDescriptor.fromLocalDBIndexTypes(index.getIndexType()), index.getIndexEntryLimit()));
- }
- }
- catch (OpenDsException oe)
- {
- errors.add(oe);
- }
-
- indexes.add(new IndexDescriptor(DN2ID_INDEX_NAME));
- if (localDBBackend.isSubordinateIndexesEnabled())
- {
- indexes.add(new IndexDescriptor(ID2CHILDREN_INDEX_NAME));
- indexes.add(new IndexDescriptor(ID2SUBTREE_INDEX_NAME));
- }
-
- try
- {
- for (String vlvIndexName : localDBBackend.listLocalDBVLVIndexes())
- {
- LocalDBVLVIndexCfgClient index = localDBBackend.getLocalDBVLVIndex(vlvIndexName);
- String s = index.getSortOrder();
- List<VLVSortOrder> sortOrder = getVLVSortOrder(s);
- vlvIndexes.add(new VLVIndexDescriptor(
- index.getName(), null, index.getBaseDN(), VLVIndexDescriptor.toSearchScope(index.getScope()),
- index.getFilter(), sortOrder));
- }
- }
- catch (OpenDsException oe)
- {
- errors.add(oe);
- }
- }
-
private boolean readIfReplicationIsSecure(final RootCfgClient root, final List<OpenDsException> errors)
{
try
diff --git a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromFile.java b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromFile.java
index 93d4cd2..86c0641 100644
--- a/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromFile.java
+++ b/opendj-server-legacy/src/main/java/org/opends/guitools/controlpanel/util/ConfigFromFile.java
@@ -64,9 +64,6 @@
import org.opends.server.admin.std.server.LDAPConnectionHandlerCfg;
import org.opends.server.admin.std.server.LDIFBackendCfg;
import org.opends.server.admin.std.server.LDIFConnectionHandlerCfg;
-import org.opends.server.admin.std.server.LocalDBBackendCfg;
-import org.opends.server.admin.std.server.LocalDBIndexCfg;
-import org.opends.server.admin.std.server.LocalDBVLVIndexCfg;
import org.opends.server.admin.std.server.MemoryBackendCfg;
import org.opends.server.admin.std.server.MonitorBackendCfg;
import org.opends.server.admin.std.server.PluggableBackendCfg;
@@ -78,7 +75,6 @@
import org.opends.server.admin.std.server.RootDNUserCfg;
import org.opends.server.admin.std.server.SNMPConnectionHandlerCfg;
import org.opends.server.admin.std.server.TaskBackendCfg;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.core.DirectoryServer;
import org.opends.server.types.DN;
import org.opends.server.types.OpenDsException;
@@ -229,41 +225,12 @@
}
final Set<IndexDescriptor> indexes = new HashSet<>();
final Set<VLVIndexDescriptor> vlvIndexes = new HashSet<>();
- BackendDescriptor.Type type;
- if (backend instanceof LocalDBBackendCfg)
+ BackendDescriptor.Type type = getBackendType(backend);
+ if (type == BackendDescriptor.Type.PLUGGABLE)
{
- type = BackendDescriptor.Type.LOCAL_DB;
- refreshLocalDBBackendConfig(errors, backend, indexes, vlvIndexes);
- }
- else if (backend instanceof PluggableBackendCfg)
- {
- type = BackendDescriptor.Type.PLUGGABLE;
refreshBackendConfig(indexes, vlvIndexes, backend, errors);
}
- else if (backend instanceof LDIFBackendCfg)
- {
- type = BackendDescriptor.Type.LDIF;
- }
- else if (backend instanceof MemoryBackendCfg)
- {
- type = BackendDescriptor.Type.MEMORY;
- }
- else if (backend instanceof BackupBackendCfg)
- {
- type = BackendDescriptor.Type.BACKUP;
- }
- else if (backend instanceof MonitorBackendCfg)
- {
- type = BackendDescriptor.Type.MONITOR;
- }
- else if (backend instanceof TaskBackendCfg)
- {
- type = BackendDescriptor.Type.TASK;
- }
- else
- {
- type = BackendDescriptor.Type.OTHER;
- }
+
final BackendDescriptor desc =
new BackendDescriptor(backend.getBackendId(), baseDNs, indexes, vlvIndexes, -1, backend.isEnabled(), type);
for (final AbstractIndexDescriptor index : indexes)
@@ -284,6 +251,38 @@
}
}
+ private BackendDescriptor.Type getBackendType(final BackendCfg backend)
+ {
+ if (backend instanceof PluggableBackendCfg)
+ {
+ return BackendDescriptor.Type.PLUGGABLE;
+ }
+ else if (backend instanceof LDIFBackendCfg)
+ {
+ return BackendDescriptor.Type.LDIF;
+ }
+ else if (backend instanceof MemoryBackendCfg)
+ {
+ return BackendDescriptor.Type.MEMORY;
+ }
+ else if (backend instanceof BackupBackendCfg)
+ {
+ return BackendDescriptor.Type.BACKUP;
+ }
+ else if (backend instanceof MonitorBackendCfg)
+ {
+ return BackendDescriptor.Type.MONITOR;
+ }
+ else if (backend instanceof TaskBackendCfg)
+ {
+ return BackendDescriptor.Type.TASK;
+ }
+ else
+ {
+ return BackendDescriptor.Type.OTHER;
+ }
+ }
+
private void refreshBackendConfig(final Set<IndexDescriptor> indexes,
final Set<VLVIndexDescriptor> vlvIndexes, final BackendCfg backend, final List<OpenDsException> errors)
{
@@ -333,48 +332,6 @@
}
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void refreshLocalDBBackendConfig(final List<OpenDsException> errors, final BackendCfg backend,
- final Set<IndexDescriptor> indexes, final Set<VLVIndexDescriptor> vlvIndexes)
- {
- final LocalDBBackendCfg db = (LocalDBBackendCfg) backend;
- try
- {
- for (final String indexName : db.listLocalDBIndexes())
- {
- final LocalDBIndexCfg index = db.getLocalDBIndex(indexName);
- indexes.add(new IndexDescriptor(index.getAttribute().getNameOrOID(), index.getAttribute(), null,
- IndexTypeDescriptor.fromLocalDBIndexTypes(index.getIndexType()), index.getIndexEntryLimit()));
- }
- }
- catch (final ConfigException ce)
- {
- errors.add(toConfigException(ce));
- }
- indexes.add(new IndexDescriptor(DN2ID_INDEX_NAME));
- if (db.isSubordinateIndexesEnabled())
- {
- indexes.add(new IndexDescriptor(ID2CHILDREN_INDEX_NAME));
- indexes.add(new IndexDescriptor(ID2SUBTREE_INDEX_NAME));
- }
-
- try
- {
- for (final String vlvIndexName : db.listLocalDBVLVIndexes())
- {
- final LocalDBVLVIndexCfg index = db.getLocalDBVLVIndex(vlvIndexName);
- final String s = index.getSortOrder();
- final List<VLVSortOrder> sortOrder = getVLVSortOrder(s);
- vlvIndexes.add(new VLVIndexDescriptor(index.getName(), null, index.getBaseDN(), VLVIndexDescriptor
- .toSearchScope(index.getScope()), index.getFilter(), sortOrder));
- }
- }
- catch (final ConfigException ce)
- {
- errors.add(toConfigException(ce));
- }
- }
-
private boolean readIfReplicationIsSecure(final RootCfg root, final List<OpenDsException> errors)
{
try
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/AttributeIndex.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/AttributeIndex.java
deleted file mode 100644
index c8f1c37..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/AttributeIndex.java
+++ /dev/null
@@ -1,938 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- * Portions Copyright 2014 Manuel Gaupp
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.util.StaticUtils.*;
-
-import java.io.Closeable;
-import java.util.*;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.LocalizableMessageBuilder;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.config.server.ConfigChangeResult;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.forgerock.opendj.ldap.Assertion;
-import org.forgerock.opendj.ldap.ByteSequence;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.schema.MatchingRule;
-import org.forgerock.opendj.ldap.spi.IndexQueryFactory;
-import org.forgerock.opendj.ldap.spi.Indexer;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.forgerock.util.Utils;
-import org.opends.server.admin.server.ConfigurationChangeListener;
-import org.opends.server.admin.std.meta.LocalDBIndexCfgDefn.IndexType;
-import org.opends.server.admin.std.server.LocalDBIndexCfg;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.types.*;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.DatabaseException;
-
-/**
- * Class representing an attribute index.
- * We have a separate database for each type of indexing, which makes it easy
- * to tell which attribute indexes are configured. The different types of
- * indexing are equality, presence, substrings and ordering. The keys in the
- * ordering index are ordered by setting the btree comparator to the ordering
- * matching rule comparator.
- * Note that the values in the equality index are normalized by the equality
- * matching rule, whereas the values in the ordering index are normalized
- * by the ordering matching rule. If these could be guaranteed to be identical
- * then we would not need a separate ordering index.
- */
-public class AttributeIndex
- implements ConfigurationChangeListener<LocalDBIndexCfg>, Closeable
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** Type of the index filter. */
- static enum IndexFilterType
- {
- /** Equality. */
- EQUALITY(IndexType.EQUALITY),
- /** Presence. */
- PRESENCE(IndexType.PRESENCE),
- /** Ordering. */
- GREATER_OR_EQUAL(IndexType.ORDERING),
- /** Ordering. */
- LESS_OR_EQUAL(IndexType.ORDERING),
- /** Substring. */
- SUBSTRING(IndexType.SUBSTRING),
- /** Approximate. */
- APPROXIMATE(IndexType.APPROXIMATE);
-
- private final IndexType indexType;
-
- private IndexFilterType(IndexType indexType)
- {
- this.indexType = indexType;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return indexType.toString();
- }
- }
-
- /*
- * FIXME Matthew Swift: Once the matching rules have been migrated we should
- * revisit this class. All of the evaluateXXX methods should go (the Matcher
- * class in the SDK could implement the logic, I hope).
- */
-
- /** The entryContainer in which this attribute index resides. */
- private final EntryContainer entryContainer;
-
- /** The attribute index configuration. */
- private LocalDBIndexCfg indexConfig;
- private IndexingOptions indexingOptions;
-
- /** The mapping from names to indexes. */
- private Map<String, Index> indexIdToIndexes;
- private IndexQueryFactory<IndexQuery> indexQueryFactory;
-
- /**
- * Create a new attribute index object.
- *
- * @param indexConfig The attribute index configuration.
- * @param entryContainer The entryContainer of this attribute index.
- * @throws ConfigException if a configuration related error occurs.
- */
- public AttributeIndex(LocalDBIndexCfg indexConfig, EntryContainer entryContainer) throws ConfigException
- {
- this.entryContainer = entryContainer;
- this.indexConfig = indexConfig;
- this.indexingOptions = new JEIndexingOptions(indexConfig.getSubstringLength());
- this.indexIdToIndexes = Collections.unmodifiableMap(buildIndexes(entryContainer, indexConfig, indexingOptions));
- this.indexQueryFactory = new IndexQueryFactoryImpl(indexIdToIndexes, indexingOptions, indexConfig.getAttribute());
- }
-
- private static Map<String, Index> buildIndexes(EntryContainer entryContainer,
- LocalDBIndexCfg config,
- IndexingOptions options) throws ConfigException
- {
- final Map<String, Index> indexes = new HashMap<>();
- final AttributeType attributeType = config.getAttribute();
- final int indexEntryLimit = config.getIndexEntryLimit();
-
- for(IndexType indexType : config.getIndexType()) {
- Collection<? extends Indexer> indexers;
- switch (indexType)
- {
- case PRESENCE:
- indexes.put(indexType.toString(), newPresenceIndex(entryContainer, config));
- indexers = Collections.emptyList();
- break;
- case EXTENSIBLE:
- indexers = getExtensibleIndexers(config.getAttribute(), config.getIndexExtensibleMatchingRule(), options);
- break;
- case APPROXIMATE:
- indexers =
- throwIfNoMatchingRule(attributeType, indexType, attributeType.getApproximateMatchingRule())
- .createIndexers(options);
- break;
- case EQUALITY:
- indexers =
- throwIfNoMatchingRule(attributeType, indexType, attributeType.getEqualityMatchingRule())
- .createIndexers(options);
- break;
- case ORDERING:
- indexers =
- throwIfNoMatchingRule(attributeType, indexType, attributeType.getOrderingMatchingRule())
- .createIndexers(options);
- break;
- case SUBSTRING:
- indexers =
- throwIfNoMatchingRule(attributeType, indexType, attributeType.getSubstringMatchingRule())
- .createIndexers(options);
- break;
- default:
- throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attributeType, indexType.toString()));
- }
- buildAndRegisterIndexesWithIndexers(entryContainer, attributeType, indexEntryLimit, indexers, indexes);
- }
-
- return indexes;
- }
-
- private static Index newPresenceIndex(EntryContainer entryContainer, LocalDBIndexCfg cfg)
- {
- final AttributeType attrType = cfg.getAttribute();
- final String indexName = getIndexName(entryContainer, attrType, IndexType.PRESENCE.toString());
- final PresenceIndexer indexer = new PresenceIndexer(attrType);
- return entryContainer.newIndexForAttribute(indexName, indexer, cfg.getIndexEntryLimit());
- }
-
- private static MatchingRule throwIfNoMatchingRule(AttributeType attributeType, IndexType type, MatchingRule rule)
- throws ConfigException
- {
- if (rule == null)
- {
- throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attributeType, type.toString()));
- }
- return rule;
- }
-
- private static void buildAndRegisterIndexesWithIndexers(EntryContainer entryContainer,
- AttributeType attributeType,
- int indexEntryLimit,
- Collection<? extends Indexer> indexers,
- Map<String, Index> indexes)
- {
- for (Indexer indexer : indexers)
- {
- final String indexID = indexer.getIndexID();
- if (!indexes.containsKey(indexID))
- {
- final Index index = newAttributeIndex(entryContainer, attributeType, indexer, indexEntryLimit);
- indexes.put(indexID, index);
- }
- }
- }
-
- private static Collection<Indexer> getExtensibleIndexers(AttributeType attributeType, Set<String> extensibleRules,
- IndexingOptions options) throws ConfigException
- {
- if (extensibleRules == null || extensibleRules.isEmpty())
- {
- throw new ConfigException(
- ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attributeType, IndexType.EXTENSIBLE.toString()));
- }
-
- final Collection<Indexer> indexers = new ArrayList<>();
- for (final String ruleName : extensibleRules)
- {
- final MatchingRule rule = DirectoryServer.getMatchingRule(toLowerCase(ruleName));
- if (rule == null)
- {
- logger.error(ERR_CONFIG_INDEX_TYPE_NEEDS_VALID_MATCHING_RULE, attributeType, ruleName);
- continue;
- }
- indexers.addAll(rule.createIndexers(options));
- }
-
- return indexers;
- }
-
- private static MatchingRule getMatchingRule(IndexType indexType, AttributeType attrType)
- {
- switch (indexType)
- {
- case APPROXIMATE:
- return attrType.getApproximateMatchingRule();
- case EQUALITY:
- return attrType.getEqualityMatchingRule();
- case ORDERING:
- return attrType.getOrderingMatchingRule();
- case SUBSTRING:
- return attrType.getSubstringMatchingRule();
- default:
- throw new IllegalArgumentException("Not implemented for index type " + indexType);
- }
- }
-
- private static Index newAttributeIndex(EntryContainer entryContainer, AttributeType attributeType,
- org.forgerock.opendj.ldap.spi.Indexer indexer, int indexEntryLimit)
- {
- final String indexName = getIndexName(entryContainer, attributeType, indexer.getIndexID());
- final AttributeIndexer attrIndexer = new AttributeIndexer(attributeType, indexer);
- return entryContainer.newIndexForAttribute(indexName, attrIndexer, indexEntryLimit);
- }
-
- private static String getIndexName(EntryContainer entryContainer, AttributeType attrType, String indexID)
- {
- return entryContainer.getDatabasePrefix() + "_" + attrType.getNameOrOID() + "." + indexID;
- }
-
- /**
- * Open the attribute index.
- *
- * @throws DatabaseException if a JE database error occurs while
- * opening the index.
- */
- public void open() throws DatabaseException
- {
- for (Index index : indexIdToIndexes.values())
- {
- index.open();
- }
- indexConfig.addChangeListener(this);
- }
-
- /** Closes the attribute index. */
- @Override
- public void close()
- {
- Utils.closeSilently(indexIdToIndexes.values());
- indexConfig.removeChangeListener(this);
- // The entryContainer is responsible for closing the JE databases.
- }
-
- /**
- * Get the attribute type of this attribute index.
- * @return The attribute type of this attribute index.
- */
- public AttributeType getAttributeType()
- {
- return indexConfig.getAttribute();
- }
-
- /**
- * Return the indexing options of this AttributeIndex.
- *
- * @return the indexing options of this AttributeIndex.
- */
- public IndexingOptions getIndexingOptions()
- {
- return indexQueryFactory.getIndexingOptions();
- }
-
- /**
- * Get the JE index configuration used by this index.
- * @return The configuration in effect.
- */
- public LocalDBIndexCfg getConfiguration()
- {
- return indexConfig;
- }
-
- /**
- * Update the attribute index for a new entry.
- *
- * @param buffer The index buffer to use to store the added keys
- * @param entryID The entry ID.
- * @param entry The contents of the new entry.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- public void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry) throws DatabaseException, DirectoryException
- {
- for (Index index : indexIdToIndexes.values())
- {
- index.addEntry(buffer, entryID, entry);
- }
- }
-
- /**
- * Update the attribute index for a deleted entry.
- *
- * @param buffer The index buffer to use to store the deleted keys
- * @param entryID The entry ID
- * @param entry The contents of the deleted entry.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- public void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry)
- throws DatabaseException, DirectoryException
- {
- for (Index index : indexIdToIndexes.values())
- {
- index.removeEntry(buffer, entryID, entry);
- }
- }
-
- /**
- * Update the index to reflect a sequence of modifications in a Modify
- * operation.
- *
- * @param buffer The index buffer used to buffer up the index changes.
- * @param entryID The ID of the entry that was modified.
- * @param oldEntry The entry before the modifications were applied.
- * @param newEntry The entry after the modifications were applied.
- * @param mods The sequence of modifications in the Modify operation.
- * @throws DatabaseException If an error occurs during an operation on a
- * JE database.
- */
- public void modifyEntry(IndexBuffer buffer,
- EntryID entryID,
- Entry oldEntry,
- Entry newEntry,
- List<Modification> mods)
- throws DatabaseException
- {
- for (Index index : indexIdToIndexes.values())
- {
- index.modifyEntry(buffer, entryID, oldEntry, newEntry, mods);
- }
- }
-
- /**
- * Makes a byte string representing a substring index key for
- * one substring of a value.
- *
- * @param bytes The byte array containing the value.
- * @param pos The starting position of the substring.
- * @param len The length of the substring.
- * @return A byte string containing a substring key.
- */
- private static ByteString makeSubstringKey(byte[] bytes, int pos, int len)
- {
- byte[] keyBytes = new byte[len];
- System.arraycopy(bytes, pos, keyBytes, 0, len);
- return ByteString.wrap(keyBytes);
- }
-
- /**
- * Decompose an attribute value into a set of substring index keys.
- * The ID of the entry containing this value should be inserted
- * into the list of each of these keys.
- *
- * @param value A byte array containing the normalized attribute value.
- * @return A set of index keys.
- */
- Set<ByteString> substringKeys(byte[] value)
- { // FIXME replace this code with SDK's
- // AbstractSubstringMatchingRuleImpl.SubstringIndexer.createKeys()
-
- // Eliminate duplicates by putting the keys into a set.
- // Sorting the keys will ensure database record locks are acquired
- // in a consistent order and help prevent transaction deadlocks between
- // concurrent writers.
- Set<ByteString> set = new HashSet<>();
-
- int substrLength = indexConfig.getSubstringLength();
-
- // Example: The value is ABCDE and the substring length is 3.
- // We produce the keys ABC BCD CDE DE E
- // To find values containing a short substring such as DE,
- // iterate through keys with prefix DE. To find values
- // containing a longer substring such as BCDE, read keys BCD and CDE.
- for (int i = 0, remain = value.length; remain > 0; i++, remain--)
- {
- int len = Math.min(substrLength, remain);
- set.add(makeSubstringKey(value, i, len));
- }
- return set;
- }
-
- /**
- * Retrieve the entry IDs that might match the provided assertion.
- *
- * @param indexQuery
- * The query used to retrieve entries.
- * @param indexName
- * The name of index used to retrieve entries.
- * @param filter
- * The filter on entries.
- * @param debugBuffer
- * If not null, a diagnostic string will be written which will help
- * determine how the indexes contributed to this search.
- * @param monitor
- * The database environment monitor provider that will keep index
- * filter usage statistics.
- * @return The candidate entry IDs that might contain the filter assertion
- * value.
- */
- private EntryIDSet evaluateIndexQuery(IndexQuery indexQuery, String indexName, SearchFilter filter,
- StringBuilder debugBuffer, DatabaseEnvironmentMonitor monitor)
- {
- LocalizableMessageBuilder debugMessage = monitor.isFilterUseEnabled() ? new LocalizableMessageBuilder() : null;
- EntryIDSet results = indexQuery.evaluate(debugMessage);
-
- if (debugBuffer != null)
- {
- debugBuffer.append("[INDEX:").append(indexConfig.getAttribute().getNameOrOID())
- .append(".").append(indexName).append("]");
- }
-
- if (monitor.isFilterUseEnabled())
- {
- if (results.isDefined())
- {
- monitor.updateStats(filter, results.size());
- }
- else
- {
- monitor.updateStats(filter, debugMessage.toMessage());
- }
- }
- return results;
- }
-
- /**
- * Retrieve the entry IDs that might match two filters that restrict a value
- * to both a lower bound and an upper bound.
- *
- * @param filter1
- * The first filter, that is either a less-or-equal filter or a
- * greater-or-equal filter.
- * @param filter2
- * The second filter, that is either a less-or-equal filter or a
- * greater-or-equal filter. It must not be of the same type than the
- * first filter.
- * @param debugBuffer
- * If not null, a diagnostic string will be written which will help
- * determine how the indexes contributed to this search.
- * @param monitor
- * The database environment monitor provider that will keep index
- * filter usage statistics.
- * @return The candidate entry IDs that might contain match both filters.
- */
- public EntryIDSet evaluateBoundedRange(SearchFilter filter1, SearchFilter filter2, StringBuilder debugBuffer,
- DatabaseEnvironmentMonitor monitor)
- {
- // TODO : this implementation is not optimal
- // as it implies two separate evaluations instead of a single one, thus defeating the purpose of
- // the optimization done in IndexFilter#evaluateLogicalAndFilter method.
- // One solution could be to implement a boundedRangeAssertion that combine the two operations in one.
- // Such an optimization can only work for attributes declared as SINGLE-VALUE, though, since multiple
- // values may match both filters with values outside the range. See OPENDJ-2194.
- StringBuilder tmpBuff1 = debugBuffer != null ? new StringBuilder(): null;
- StringBuilder tmpBuff2 = debugBuffer != null ? new StringBuilder(): null;
- EntryIDSet results1 = evaluate(filter1, tmpBuff1, monitor);
- EntryIDSet results2 = evaluate(filter2, tmpBuff2, monitor);
- if (debugBuffer != null)
- {
- debugBuffer
- .append(filter1).append(tmpBuff1).append(results1)
- .append(filter2).append(tmpBuff2).append(results2);
- }
- results1.retainAll(results2);
- return results1;
- }
-
- private EntryIDSet evaluate(SearchFilter filter, StringBuilder debugBuffer, DatabaseEnvironmentMonitor monitor)
- {
- boolean isLessOrEqual = filter.getFilterType() == FilterType.LESS_OR_EQUAL;
- IndexFilterType indexFilterType = isLessOrEqual ? IndexFilterType.LESS_OR_EQUAL : IndexFilterType.GREATER_OR_EQUAL;
- return evaluateFilter(indexFilterType, filter, debugBuffer, monitor);
- }
-
- /**
- * Retrieve the entry IDs that might match a filter.
- *
- * @param indexFilterType the index type filter
- * @param filter The filter.
- * @param debugBuffer If not null, a diagnostic string will be written
- * which will help determine how the indexes contributed
- * to this search.
- * @param monitor The database environment monitor provider that will keep
- * index filter usage statistics.
- * @return The candidate entry IDs that might contain a value
- * that matches the filter type.
- */
- public EntryIDSet evaluateFilter(IndexFilterType indexFilterType, SearchFilter filter, StringBuilder debugBuffer,
- DatabaseEnvironmentMonitor monitor)
- {
- try
- {
- final IndexQuery indexQuery = getIndexQuery(indexFilterType, filter);
- return evaluateIndexQuery(indexQuery, indexFilterType.toString(), filter, debugBuffer, monitor);
- }
- catch (DecodeException e)
- {
- logger.traceException(e);
- return new EntryIDSet();
- }
- }
-
- private IndexQuery getIndexQuery(IndexFilterType indexFilterType, SearchFilter filter) throws DecodeException
- {
- MatchingRule rule;
- Assertion assertion;
- switch (indexFilterType)
- {
- case EQUALITY:
- rule = filter.getAttributeType().getEqualityMatchingRule();
- assertion = rule.getAssertion(filter.getAssertionValue());
- return assertion.createIndexQuery(indexQueryFactory);
-
- case PRESENCE:
- return indexQueryFactory.createMatchAllQuery();
-
- case GREATER_OR_EQUAL:
- rule = filter.getAttributeType().getOrderingMatchingRule();
- assertion = rule.getGreaterOrEqualAssertion(filter.getAssertionValue());
- return assertion.createIndexQuery(indexQueryFactory);
-
- case LESS_OR_EQUAL:
- rule = filter.getAttributeType().getOrderingMatchingRule();
- assertion = rule.getLessOrEqualAssertion(filter.getAssertionValue());
- return assertion.createIndexQuery(indexQueryFactory);
-
- case SUBSTRING:
- rule = filter.getAttributeType().getSubstringMatchingRule();
- assertion = rule.getSubstringAssertion(
- filter.getSubInitialElement(), filter.getSubAnyElements(), filter.getSubFinalElement());
- return assertion.createIndexQuery(indexQueryFactory);
-
- case APPROXIMATE:
- rule = filter.getAttributeType().getApproximateMatchingRule();
- assertion = rule.getAssertion(filter.getAssertionValue());
- return assertion.createIndexQuery(indexQueryFactory);
-
- default:
- return null;
- }
- }
-
- /**
- * Delegator to {@link ByteSequence#BYTE_ARRAY_COMPARATOR}.
- * <p>
- * This intermediate class is necessary to satisfy JE's requirements for a btree comparator.
- *
- * @see com.sleepycat.je.DatabaseConfig#setBtreeComparator(Comparator)
- */
- public static class KeyComparator implements Comparator<byte[]>
- {
- /** The instance. */
- public static final KeyComparator INSTANCE = new KeyComparator();
-
- /** {@inheritDoc} */
- @Override
- public int compare(byte[] a, byte[] b)
- {
- return ByteSequence.BYTE_ARRAY_COMPARATOR.compare(a, b);
- }
- }
-
- /**
- * Return the number of values that have exceeded the entry limit since this
- * object was created.
- *
- * @return The number of values that have exceeded the entry limit.
- */
- public long getEntryLimitExceededCount()
- {
- long entryLimitExceededCount = 0;
-
- for (Index index : indexIdToIndexes.values())
- {
- entryLimitExceededCount += index.getEntryLimitExceededCount();
- }
- return entryLimitExceededCount;
- }
-
- /**
- * Get a list of the databases opened by this attribute index.
- * @param dbList A list of database containers.
- */
- public void listDatabases(List<DatabaseContainer> dbList)
- {
- dbList.addAll(indexIdToIndexes.values());
- }
-
- /**
- * Get a string representation of this object.
- * @return return A string representation of this object.
- */
- @Override
- public String toString()
- {
- return getName();
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized boolean isConfigurationChangeAcceptable(
- LocalDBIndexCfg cfg, List<LocalizableMessage> unacceptableReasons)
- {
- if (!isIndexAcceptable(cfg, IndexType.EQUALITY, unacceptableReasons)
- || !isIndexAcceptable(cfg, IndexType.SUBSTRING, unacceptableReasons)
- || !isIndexAcceptable(cfg, IndexType.ORDERING, unacceptableReasons)
- || !isIndexAcceptable(cfg, IndexType.APPROXIMATE, unacceptableReasons))
- {
- return false;
- }
-
- AttributeType attrType = cfg.getAttribute();
- if (cfg.getIndexType().contains(IndexType.EXTENSIBLE))
- {
- Set<String> newRules = cfg.getIndexExtensibleMatchingRule();
- if (newRules == null || newRules.isEmpty())
- {
- unacceptableReasons.add(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attrType, "extensible"));
- return false;
- }
- }
- return true;
- }
-
- private static boolean isIndexAcceptable(LocalDBIndexCfg cfg, IndexType indexType,
- List<LocalizableMessage> unacceptableReasons)
- {
- final AttributeType attrType = cfg.getAttribute();
- if (cfg.getIndexType().contains(indexType)
- && getMatchingRule(indexType, attrType) == null)
- {
- unacceptableReasons.add(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attrType, indexType.toString()));
- return false;
- }
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized ConfigChangeResult applyConfigurationChange(final LocalDBIndexCfg newConfiguration)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
- final IndexingOptions newIndexingOptions = new JEIndexingOptions(newConfiguration.getSubstringLength());
- try
- {
- Map<String, Index> newIndexIdToIndexes = buildIndexes(entryContainer, newConfiguration, newIndexingOptions);
-
- final Map<String, Index> removedIndexes = new HashMap<>(indexIdToIndexes);
- removedIndexes.keySet().removeAll(newIndexIdToIndexes.keySet());
-
- final Map<String, Index> addedIndexes = new HashMap<>(newIndexIdToIndexes);
- addedIndexes.keySet().removeAll(indexIdToIndexes.keySet());
-
- final Map<String, Index> updatedIndexes = new HashMap<>(indexIdToIndexes);
- updatedIndexes.keySet().retainAll(newIndexIdToIndexes.keySet());
-
- // Replace instances of Index created by buildIndexes() with the one already opened and present in the actual
- // indexIdToIndexes
- newIndexIdToIndexes.putAll(updatedIndexes);
-
- // Open added indexes *before* adding them to indexIdToIndexes
- for (Index addedIndex : addedIndexes.values())
- {
- openIndex(addedIndex, ccr);
- }
-
- indexConfig = newConfiguration;
- indexingOptions = newIndexingOptions;
- indexIdToIndexes = Collections.unmodifiableMap(newIndexIdToIndexes);
- indexQueryFactory = new IndexQueryFactoryImpl(indexIdToIndexes, indexingOptions, indexConfig.getAttribute());
-
- // FIXME: There is no guarantee here that deleted index are not currently involved in a query
- for (Index removedIndex : removedIndexes.values())
- {
- deleteIndex(entryContainer, removedIndex);
- }
-
- for (Index updatedIndex : updatedIndexes.values())
- {
- updateIndex(updatedIndex, newConfiguration.getIndexEntryLimit(), ccr);
- }
- }
- catch (Exception e)
- {
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(e)));
- }
-
- return ccr;
- }
-
- private static void openIndex(Index index, ConfigChangeResult ccr)
- {
- index.open();
- if (!index.isTrusted())
- {
- ccr.setAdminActionRequired(true);
- ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(index.getName()));
- }
- }
-
- private static void updateIndex(Index updatedIndex, int newIndexEntryLimit, ConfigChangeResult ccr)
- {
- if (updatedIndex.setIndexEntryLimit(newIndexEntryLimit))
- {
- // This index can still be used since index size limit doesn't impact validity of the results.
- ccr.setAdminActionRequired(true);
- ccr.addMessage(NOTE_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(updatedIndex.getName()));
- }
- }
-
- private static void deleteIndex(EntryContainer entryContainer, Index index)
- {
- entryContainer.exclusiveLock.lock();
- try
- {
- entryContainer.deleteDatabase(index);
- }
- finally
- {
- entryContainer.exclusiveLock.unlock();
- }
- }
-
- /**
- * Return true iff this index is trusted.
- * @return the trusted state of this index
- */
- public boolean isTrusted()
- {
- for (Index index : indexIdToIndexes.values())
- {
- if (!index.isTrusted())
- {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Get the JE database name prefix for indexes in this attribute index.
- *
- * @return JE database name for this database container.
- */
- public String getName()
- {
- return entryContainer.getDatabasePrefix()
- + "_"
- + indexConfig.getAttribute().getNameOrOID();
- }
-
- Index getIndex(String indexID) {
- return indexIdToIndexes.get(indexID);
- }
-
- /**
- * Retrieves all the indexes used by this attribute index.
- *
- * @return An immutable collection of all indexes in use by this attribute
- * index.
- */
- public Collection<Index> getAllIndexes() {
- return indexIdToIndexes.values();
- }
-
- /**
- * Retrieve the entry IDs that might match an extensible filter.
- *
- * @param filter The extensible filter.
- * @param debugBuffer If not null, a diagnostic string will be written
- * which will help determine how the indexes contributed
- * to this search.
- * @param monitor The database environment monitor provider that will keep
- * index filter usage statistics.
- * @return The candidate entry IDs that might contain the filter
- * assertion value.
- */
- public EntryIDSet evaluateExtensibleFilter(SearchFilter filter,
- StringBuilder debugBuffer,
- DatabaseEnvironmentMonitor monitor)
- {
- //Get the Matching Rule OID of the filter.
- String matchRuleOID = filter.getMatchingRuleID();
- /*
- * Use the default equality index in two conditions:
- * 1. There is no matching rule provided
- * 2. The matching rule specified is actually the default equality.
- */
- MatchingRule eqRule = indexConfig.getAttribute().getEqualityMatchingRule();
- if (matchRuleOID == null
- || matchRuleOID.equals(eqRule.getOID())
- || matchRuleOID.equalsIgnoreCase(eqRule.getNameOrOID()))
- {
- //No matching rule is defined; use the default equality matching rule.
- return evaluateFilter(IndexFilterType.EQUALITY, filter, debugBuffer, monitor);
- }
-
- MatchingRule rule = DirectoryServer.getMatchingRule(matchRuleOID);
- if (!ruleHasAtLeasOneIndex(rule))
- {
- if (monitor.isFilterUseEnabled())
- {
- monitor.updateStats(filter, INFO_INDEX_FILTER_MATCHING_RULE_NOT_INDEXED.get(
- matchRuleOID, indexConfig.getAttribute().getNameOrOID()));
- }
- return IndexQuery.createNullIndexQuery().evaluate(null);
- }
-
- try
- {
- if (debugBuffer != null)
- {
- debugBuffer.append("[INDEX:");
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.createIndexers(indexingOptions))
- {
- debugBuffer.append(" ")
- .append(filter.getAttributeType().getNameOrOID())
- .append(".")
- .append(indexer.getIndexID());
- }
- debugBuffer.append("]");
- }
-
- final IndexQuery indexQuery = rule.getAssertion(filter.getAssertionValue()).createIndexQuery(indexQueryFactory);
- LocalizableMessageBuilder debugMessage = monitor.isFilterUseEnabled() ? new LocalizableMessageBuilder() : null;
- EntryIDSet results = indexQuery.evaluate(debugMessage);
- if (monitor.isFilterUseEnabled())
- {
- if (results.isDefined())
- {
- monitor.updateStats(filter, results.size());
- }
- else
- {
- monitor.updateStats(filter, debugMessage.toMessage());
- }
- }
- return results;
- }
- catch (DecodeException e)
- {
- logger.traceException(e);
- return IndexQuery.createNullIndexQuery().evaluate(null);
- }
- }
-
- private boolean ruleHasAtLeasOneIndex(MatchingRule rule)
- {
- for (org.forgerock.opendj.ldap.spi.Indexer indexer : rule.createIndexers(indexingOptions))
- {
- if (indexIdToIndexes.containsKey(indexer.getIndexID()))
- {
- return true;
- }
- }
- return false;
- }
-
- /** This class extends the IndexConfig for JE Backend. */
- private static final class JEIndexingOptions implements IndexingOptions
- {
- /** The length of the substring index. */
- private int substringLength;
-
- /**
- * Creates a new JEIndexConfig instance.
- * @param substringLength The length of the substring.
- */
- private JEIndexingOptions(int substringLength)
- {
- this.substringLength = substringLength;
- }
-
- /** {@inheritDoc} */
- @Override
- public int substringKeySize()
- {
- return substringLength;
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/AttributeIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/AttributeIndexer.java
deleted file mode 100644
index c1a7ca7..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/AttributeIndexer.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2009-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.schema.Schema;
-import org.opends.server.types.Attribute;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * This class implements an attribute indexer for matching rules in JE Backend.
- */
-public final class AttributeIndexer extends Indexer
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The attribute type for which this instance will generate index keys. */
- private final AttributeType attributeType;
-
- /**
- * The indexer which will generate the keys
- * for the associated extensible matching rule.
- */
- private final org.forgerock.opendj.ldap.spi.Indexer indexer;
-
- /**
- * Creates a new extensible indexer for JE backend.
- *
- * @param attributeType The attribute type for which an indexer is
- * required.
- * @param extensibleIndexer The extensible indexer to be used.
- */
- public AttributeIndexer(AttributeType attributeType, org.forgerock.opendj.ldap.spi.Indexer extensibleIndexer)
- {
- this.attributeType = attributeType;
- this.indexer = extensibleIndexer;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return attributeType.getNameOrOID() + "." + indexer.getIndexID();
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> keys)
- {
- final List<Attribute> attrList = entry.getAttribute(attributeType);
- if (attrList != null)
- {
- indexAttribute(attrList, keys);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods, Map<ByteString, Boolean> modifiedKeys)
- {
- List<Attribute> newAttributes = newEntry.getAttribute(attributeType, true);
- List<Attribute> oldAttributes = oldEntry.getAttribute(attributeType, true);
-
- indexAttribute(oldAttributes, modifiedKeys, false);
- indexAttribute(newAttributes, modifiedKeys, true);
- }
-
-
-
- /**
- * Generates the set of extensible index keys for an attribute.
- * @param attrList The attribute for which substring keys are required.
- * @param keys The set into which the generated keys will be inserted.
- */
- private void indexAttribute(List<Attribute> attrList, Set<ByteString> keys)
- {
- if (attrList == null)
- {
- return;
- }
-
- for (Attribute attr : attrList)
- {
- if (!attr.isVirtual())
- {
- for (ByteString value : attr)
- {
- try
- {
- indexer.createKeys(Schema.getDefaultSchema(), value, keys);
- }
- catch (DecodeException e)
- {
- logger.traceException(e);
- }
- }
- }
- }
- }
-
- /**
- * Generates the set of index keys for an attribute.
- * @param attrList The attribute to be indexed.
- * @param modifiedKeys The map into which the modified
- * keys will be inserted.
- * @param insert <code>true</code> if generated keys should
- * be inserted or <code>false</code> otherwise.
- */
- private void indexAttribute(List<Attribute> attrList, Map<ByteString, Boolean> modifiedKeys, Boolean insert)
- {
- if (attrList == null)
- {
- return;
- }
-
- final Set<ByteString> keys = new HashSet<>();
- indexAttribute(attrList, keys);
- computeModifiedKeys(modifiedKeys, insert, keys);
- }
-
- /**
- * Computes a map of index keys and a boolean flag indicating whether the
- * corresponding key will be inserted or deleted.
- *
- * @param modifiedKeys
- * A map containing the keys and a boolean. Keys corresponding to the
- * boolean value <code>true</code> should be inserted and
- * <code>false</code> should be deleted.
- * @param insert
- * <code>true</code> if generated keys should be inserted or
- * <code>false</code> otherwise.
- * @param keys
- * The index keys to map.
- */
- private static void computeModifiedKeys(Map<ByteString, Boolean> modifiedKeys,
- Boolean insert, Set<ByteString> keys)
- {
- for (ByteString key : keys)
- {
- Boolean cInsert = modifiedKeys.get(key);
- if (cInsert == null)
- {
- modifiedKeys.put(key, insert);
- }
- else if (!cInsert.equals(insert))
- {
- modifiedKeys.remove(key);
- }
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/BackendImpl.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/BackendImpl.java
deleted file mode 100644
index c222569..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/BackendImpl.java
+++ /dev/null
@@ -1,1435 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2007-2010 Sun Microsystems, Inc.
- * Portions Copyright 2013-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static com.sleepycat.je.EnvironmentConfig.*;
-
-import static org.forgerock.util.Reject.*;
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.messages.UtilityMessages.*;
-import static org.opends.server.backends.jeb.ConfigurableEnvironment.*;
-import static org.opends.server.util.ServerConstants.*;
-import static org.opends.server.util.StaticUtils.*;
-
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.logging.Level;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.config.server.ConfigChangeResult;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.forgerock.opendj.ldap.ConditionResult;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.util.Reject;
-import org.opends.server.admin.server.ConfigurationChangeListener;
-import org.opends.server.admin.std.meta.LocalDBIndexCfgDefn;
-import org.opends.server.admin.std.server.LocalDBBackendCfg;
-import org.opends.server.api.AlertGenerator;
-import org.opends.server.api.Backend;
-import org.opends.server.api.Backupable;
-import org.opends.server.api.DiskSpaceMonitorHandler;
-import org.opends.server.api.MonitorProvider;
-import org.opends.server.backends.RebuildConfig;
-import org.opends.server.backends.VerifyConfig;
-import org.opends.server.backends.pluggable.spi.StorageStatus;
-import org.opends.server.core.AddOperation;
-import org.opends.server.core.DeleteOperation;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.core.ModifyDNOperation;
-import org.opends.server.core.ModifyOperation;
-import org.opends.server.core.SearchOperation;
-import org.opends.server.core.ServerContext;
-import org.opends.server.extensions.DiskSpaceMonitor;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.BackupConfig;
-import org.opends.server.types.BackupDirectory;
-import org.opends.server.types.CanceledOperationException;
-import org.opends.server.types.DN;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.Entry;
-import org.opends.server.types.IdentifiedException;
-import org.opends.server.types.IndexType;
-import org.opends.server.types.InitializationException;
-import org.opends.server.types.LDIFExportConfig;
-import org.opends.server.types.LDIFImportConfig;
-import org.opends.server.types.LDIFImportResult;
-import org.opends.server.types.Operation;
-import org.opends.server.types.Privilege;
-import org.opends.server.types.RestoreConfig;
-import org.opends.server.util.BackupManager;
-import org.opends.server.util.CollectionUtils;
-import org.opends.server.util.RuntimeInformation;
-
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.Durability;
-import com.sleepycat.je.EnvironmentConfig;
-import com.sleepycat.je.EnvironmentFailureException;
-
-/**
- * This is an implementation of a Directory Server Backend which stores entries
- * locally in a Berkeley DB JE database.
- */
-public class BackendImpl extends Backend<LocalDBBackendCfg>
- implements ConfigurationChangeListener<LocalDBBackendCfg>, AlertGenerator,
- DiskSpaceMonitorHandler, Backupable
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The configuration of this JE backend. */
- private LocalDBBackendCfg cfg;
- /** The root JE container to use for this backend. */
- private RootContainer rootContainer;
- /** A count of the total operation threads currently in the backend. */
- private final AtomicInteger threadTotalCount = new AtomicInteger(0);
- /** A count of the write operation threads currently in the backend. */
- private final AtomicInteger threadWriteCount = new AtomicInteger(0);
- /** The base DNs defined for this backend instance. */
- private DN[] baseDNs;
-
- private MonitorProvider<?> rootContainerMonitor;
- private DiskSpaceMonitor diskMonitor;
- private StorageStatus storageStatus = StorageStatus.working();
-
- /** The controls supported by this backend. */
- private static final Set<String> supportedControls = CollectionUtils.newHashSet(
- OID_SUBTREE_DELETE_CONTROL,
- OID_PAGED_RESULTS_CONTROL,
- OID_MANAGE_DSAIT_CONTROL,
- OID_SERVER_SIDE_SORT_REQUEST_CONTROL,
- OID_VLV_REQUEST_CONTROL);
-
- /** Begin a Backend API method that reads the database. */
- private void readerBegin()
- {
- threadTotalCount.getAndIncrement();
- }
-
- /** End a Backend API method that reads the database. */
- private void readerEnd()
- {
- threadTotalCount.getAndDecrement();
- }
-
- /** Begin a Backend API method that writes the database. */
- private void writerBegin()
- {
- threadTotalCount.getAndIncrement();
- threadWriteCount.getAndIncrement();
- }
-
- /** End a Backend API method that writes the database. */
- private void writerEnd()
- {
- threadWriteCount.getAndDecrement();
- threadTotalCount.getAndDecrement();
- }
-
-
-
- /**
- * Wait until there are no more threads accessing the database. It is assumed
- * that new threads have been prevented from entering the database at the time
- * this method is called.
- */
- private void waitUntilQuiescent()
- {
- while (threadTotalCount.get() > 0)
- {
- // Still have threads in the database so sleep a little
- try
- {
- Thread.sleep(500);
- }
- catch (InterruptedException e)
- {
- logger.traceException(e);
- }
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void configureBackend(LocalDBBackendCfg cfg, ServerContext serverContext) throws ConfigException
- {
- Reject.ifNull(cfg);
-
- this.cfg = cfg;
- baseDNs = this.cfg.getBaseDN().toArray(new DN[0]);
- diskMonitor = serverContext.getDiskSpaceMonitor();
- }
-
- /** {@inheritDoc} */
- @Override
- public void openBackend()
- throws ConfigException, InitializationException
- {
- if (mustOpenRootContainer())
- {
- rootContainer = initializeRootContainer(parseConfigEntry(cfg));
- }
-
- // Preload the database cache.
- rootContainer.preload(cfg.getPreloadTimeLimit());
-
- try
- {
- // Log an informational message about the number of entries.
- logger.info(NOTE_BACKEND_STARTED, cfg.getBackendId(), rootContainer.getEntryCount());
- }
- catch(DatabaseException databaseException)
- {
- logger.traceException(databaseException);
- throw new InitializationException(
- WARN_GET_ENTRY_COUNT_FAILED.get(databaseException.getMessage()), databaseException);
- }
-
- for (DN dn : cfg.getBaseDN())
- {
- try
- {
- DirectoryServer.registerBaseDN(dn, this, false);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- throw new InitializationException(ERR_BACKEND_CANNOT_REGISTER_BASEDN.get(dn, e), e);
- }
- }
-
- // Register a monitor provider for the environment.
- rootContainerMonitor = rootContainer.getMonitorProvider();
- DirectoryServer.registerMonitorProvider(rootContainerMonitor);
-
- // Register as disk space monitor handler
- diskMonitor.registerMonitoredDirectory(getBackendID(), getDirectory(), cfg.getDiskLowThreshold(),
- cfg.getDiskFullThreshold(), this);
-
- //Register as an AlertGenerator.
- DirectoryServer.registerAlertGenerator(this);
- // Register this backend as a change listener.
- cfg.addLocalDBChangeListener(this);
- }
-
- /** {@inheritDoc} */
- @Override
- public File getDirectory()
- {
- File parentDirectory = getFileForPath(cfg.getDBDirectory());
- return new File(parentDirectory, cfg.getBackendId());
- }
-
- /** {@inheritDoc} */
- @Override
- public void closeBackend()
- {
- cfg.removeLocalDBChangeListener(this);
-
- // Deregister our base DNs.
- for (DN dn : rootContainer.getBaseDNs())
- {
- try
- {
- DirectoryServer.deregisterBaseDN(dn);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- }
- }
-
- DirectoryServer.deregisterMonitorProvider(rootContainerMonitor);
- diskMonitor.deregisterMonitoredDirectory(getDirectory(), this);
- // We presume the server will prevent more operations coming into this
- // backend, but there may be existing operations already in the
- // backend. We need to wait for them to finish.
- waitUntilQuiescent();
-
- // Close the database.
- try
- {
- rootContainer.close();
- rootContainer = null;
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- logger.error(ERR_DATABASE_EXCEPTION, e.getMessage());
- }
-
- DirectoryServer.deregisterAlertGenerator(this);
-
- // Make sure the thread counts are zero for next initialization.
- threadTotalCount.set(0);
- threadWriteCount.set(0);
-
- // Log an informational message.
- logger.info(NOTE_BACKEND_OFFLINE, cfg.getBackendId());
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public boolean isIndexed(AttributeType attributeType, IndexType indexType)
- {
- try
- {
- EntryContainer ec = rootContainer.getEntryContainer(baseDNs[0]);
- AttributeIndex ai = ec.getAttributeIndex(attributeType);
- if (ai == null)
- {
- return false;
- }
-
- Set<LocalDBIndexCfgDefn.IndexType> indexTypes =
- ai.getConfiguration().getIndexType();
- switch (indexType)
- {
- case PRESENCE:
- return indexTypes.contains(LocalDBIndexCfgDefn.IndexType.PRESENCE);
-
- case EQUALITY:
- return indexTypes.contains(LocalDBIndexCfgDefn.IndexType.EQUALITY);
-
- case SUBSTRING:
- case SUBINITIAL:
- case SUBANY:
- case SUBFINAL:
- return indexTypes.contains(LocalDBIndexCfgDefn.IndexType.SUBSTRING);
-
- case GREATER_OR_EQUAL:
- case LESS_OR_EQUAL:
- return indexTypes.contains(LocalDBIndexCfgDefn.IndexType.ORDERING);
-
- case APPROXIMATE:
- return indexTypes.contains(LocalDBIndexCfgDefn.IndexType.APPROXIMATE);
-
- default:
- return false;
- }
- }
- catch (Exception e)
- {
- logger.traceException(e);
-
- return false;
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean supports(BackendOperation backendOperation)
- {
- // it supports all the operations so far
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public Set<String> getSupportedFeatures()
- {
- return Collections.emptySet();
- }
-
- /** {@inheritDoc} */
- @Override
- public Set<String> getSupportedControls()
- {
- return supportedControls;
- }
-
- /** {@inheritDoc} */
- @Override
- public DN[] getBaseDNs()
- {
- return baseDNs;
- }
-
- /** {@inheritDoc} */
- @Override
- public long getEntryCount()
- {
- if (rootContainer != null)
- {
- try
- {
- return rootContainer.getEntryCount();
- }
- catch (Exception e)
- {
- logger.traceException(e);
- }
- }
-
- return -1;
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public ConditionResult hasSubordinates(DN entryDN)
- throws DirectoryException
- {
- long ret = numSubordinates(entryDN, false);
- if(ret < 0)
- {
- return ConditionResult.UNDEFINED;
- }
- return ConditionResult.valueOf(ret != 0);
- }
-
- /** {@inheritDoc} */
- @Override
- public long getNumberOfEntriesInBaseDN(DN baseDN) throws DirectoryException {
- checkNotNull(baseDN, "baseDN must not be null");
- EntryContainer ec = rootContainer.getEntryContainer(baseDN);
- if (ec == null || !ec.getBaseDN().equals(baseDN))
- {
- throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, ERR_SEARCH_NO_SUCH_OBJECT.get(baseDN));
- }
- return numSubordinates(baseDN, true);
- }
-
- /** {@inheritDoc} */
- @Override
- public long getNumberOfChildren(DN parentDN) throws DirectoryException {
- checkNotNull(parentDN, "parentDN must not be null");
- return numSubordinates(parentDN, false);
- }
-
- private long numSubordinates(DN entryDN, boolean subtree) throws DirectoryException
- {
- checkRootContainerInitialized();
- EntryContainer ec = rootContainer.getEntryContainer(entryDN);
- if(ec == null)
- {
- return -1;
- }
-
- readerBegin();
- ec.sharedLock.lock();
- try
- {
- long count = ec.getNumSubordinates(entryDN, subtree);
- if(count == Long.MAX_VALUE)
- {
- // The index entry limit has exceeded and there is no count maintained.
- return -1;
- }
- return count;
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- finally
- {
- ec.sharedLock.unlock();
- readerEnd();
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public Entry getEntry(DN entryDN) throws DirectoryException
- {
- readerBegin();
- checkRootContainerInitialized();
- EntryContainer ec = rootContainer.getEntryContainer(entryDN);
- ec.sharedLock.lock();
- try
- {
- return ec.getEntry(entryDN);
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- finally
- {
- ec.sharedLock.unlock();
- readerEnd();
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public void addEntry(Entry entry, AddOperation addOperation)
- throws DirectoryException, CanceledOperationException
- {
- checkDiskSpace(addOperation);
- writerBegin();
-
- checkRootContainerInitialized();
- EntryContainer ec = rootContainer.getEntryContainer(entry.getName());
- ec.sharedLock.lock();
- try
- {
- ec.addEntry(entry, addOperation);
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- finally
- {
- ec.sharedLock.unlock();
- writerEnd();
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public void deleteEntry(DN entryDN, DeleteOperation deleteOperation)
- throws DirectoryException, CanceledOperationException
- {
- checkDiskSpace(deleteOperation);
- writerBegin();
-
- checkRootContainerInitialized();
- EntryContainer ec = rootContainer.getEntryContainer(entryDN);
- ec.sharedLock.lock();
- try
- {
- ec.deleteEntry(entryDN, deleteOperation);
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- finally
- {
- ec.sharedLock.unlock();
- writerEnd();
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public void replaceEntry(Entry oldEntry, Entry newEntry,
- ModifyOperation modifyOperation) throws DirectoryException,
- CanceledOperationException
- {
- checkDiskSpace(modifyOperation);
- writerBegin();
-
- checkRootContainerInitialized();
- EntryContainer ec = rootContainer.getEntryContainer(newEntry.getName());
- ec.sharedLock.lock();
-
- try
- {
- ec.replaceEntry(oldEntry, newEntry, modifyOperation);
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- finally
- {
- ec.sharedLock.unlock();
- writerEnd();
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public void renameEntry(DN currentDN, Entry entry,
- ModifyDNOperation modifyDNOperation)
- throws DirectoryException, CanceledOperationException
- {
- checkDiskSpace(modifyDNOperation);
- writerBegin();
-
- checkRootContainerInitialized();
- EntryContainer currentContainer = rootContainer.getEntryContainer(currentDN);
- EntryContainer container = rootContainer.getEntryContainer(entry.getName());
-
- if (currentContainer != container)
- {
- // FIXME: No reason why we cannot implement a move between containers
- // since the containers share the same database environment.
- LocalizableMessage msg = WARN_FUNCTION_NOT_SUPPORTED.get();
- throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, msg);
- }
-
- currentContainer.sharedLock.lock();
- try
- {
- currentContainer.renameEntry(currentDN, entry, modifyDNOperation);
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- finally
- {
- currentContainer.sharedLock.unlock();
- writerEnd();
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public void search(SearchOperation searchOperation)
- throws DirectoryException, CanceledOperationException
- {
- readerBegin();
-
- checkRootContainerInitialized();
- EntryContainer ec = rootContainer.getEntryContainer(searchOperation.getBaseDN());
- ec.sharedLock.lock();
-
- try
- {
- ec.search(searchOperation);
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- finally
- {
- ec.sharedLock.unlock();
- readerEnd();
- }
- }
-
- private void checkRootContainerInitialized() throws DirectoryException
- {
- if (rootContainer == null)
- {
- LocalizableMessage msg = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), msg);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void exportLDIF(LDIFExportConfig exportConfig)
- throws DirectoryException
- {
- // If the backend already has the root container open, we must use the same
- // underlying root container
- boolean openRootContainer = mustOpenRootContainer();
- final ResultCode errorRC = DirectoryServer.getServerErrorResultCode();
- try
- {
- if (openRootContainer)
- {
- rootContainer = getReadOnlyRootContainer();
- }
-
- ExportJob exportJob = new ExportJob(exportConfig);
- exportJob.exportLDIF(rootContainer);
- }
- catch (IOException ioe)
- {
- logger.traceException(ioe);
- throw new DirectoryException(errorRC, ERR_EXPORT_IO_ERROR.get(ioe.getMessage()), ioe);
- }
- catch (DatabaseException de)
- {
- logger.traceException(de);
- throw createDirectoryException(de);
- }
- catch (ConfigException ce)
- {
- throw new DirectoryException(errorRC, ce.getMessageObject(), ce);
- }
- catch (IdentifiedException e)
- {
- if (e instanceof DirectoryException)
- {
- throw (DirectoryException) e;
- }
- logger.traceException(e);
- throw new DirectoryException(errorRC, e.getMessageObject(), e);
- }
- finally
- {
- closeTemporaryRootContainer(openRootContainer);
- }
- }
-
- private boolean mustOpenRootContainer()
- {
- return rootContainer == null;
- }
-
- /** {@inheritDoc} */
- @Override
- public LDIFImportResult importLDIF(LDIFImportConfig importConfig, ServerContext serverContext)
- throws DirectoryException
- {
- RuntimeInformation.logInfo();
-
- // If the backend already has the root container open, we must use the same
- // underlying root container
- boolean openRootContainer = rootContainer == null;
-
- // If the rootContainer is open, the backend is initialized by something else.
- // We can't do import while the backend is online.
- final ResultCode errorRC = DirectoryServer.getServerErrorResultCode();
- if(!openRootContainer)
- {
- throw new DirectoryException(errorRC, ERR_IMPORT_BACKEND_ONLINE.get());
- }
-
- try
- {
- if (Importer.mustClearBackend(importConfig, cfg))
- {
- // We have the writer lock on the environment, now delete the
- // environment and re-open it. Only do this when we are
- // importing to all the base DNs in the backend or if the backend only
- // have one base DN.
- File parentDirectory = getFileForPath(cfg.getDBDirectory());
- File backendDirectory = new File(parentDirectory, cfg.getBackendId());
- // If the backend does not exist the import will create it.
- if (backendDirectory.exists())
- {
- EnvManager.removeFiles(backendDirectory.getPath());
- }
- }
-
- final EnvironmentConfig envConfig = getEnvConfigForImport();
- final Importer importer = new Importer(importConfig, cfg, envConfig, serverContext);
- rootContainer = initializeRootContainer(envConfig);
- return importer.processImport(rootContainer);
- }
- catch (ExecutionException execEx)
- {
- logger.traceException(execEx);
- if (execEx.getCause() instanceof DirectoryException)
- {
- throw ((DirectoryException) execEx.getCause());
- }
- throw new DirectoryException(errorRC, ERR_EXECUTION_ERROR.get(execEx.getMessage()));
- }
- catch (InterruptedException intEx)
- {
- logger.traceException(intEx);
- throw new DirectoryException(errorRC, ERR_INTERRUPTED_ERROR.get(intEx.getMessage()));
- }
- catch (JebException | InitializationException | ConfigException e)
- {
- logger.traceException(e);
- throw new DirectoryException(errorRC, e.getMessageObject());
- }
- finally
- {
- // leave the backend in the same state.
- try
- {
- if (rootContainer != null)
- {
- long startTime = System.currentTimeMillis();
- rootContainer.close();
- long finishTime = System.currentTimeMillis();
- long closeTime = (finishTime - startTime) / 1000;
- logger.info(NOTE_IMPORT_LDIF_ROOTCONTAINER_CLOSE, closeTime);
- rootContainer = null;
- }
-
- // Sync the environment to disk.
- logger.info(NOTE_IMPORT_CLOSING_DATABASE);
- }
- catch (DatabaseException de)
- {
- logger.traceException(de);
- }
- }
- }
-
- private EnvironmentConfig getEnvConfigForImport()
- {
- final EnvironmentConfig envConfig = new EnvironmentConfig();
- envConfig.setAllowCreate(true);
- envConfig.setTransactional(false);
- envConfig.setDurability(Durability.COMMIT_NO_SYNC);
- envConfig.setLockTimeout(0, TimeUnit.SECONDS);
- envConfig.setTxnTimeout(0, TimeUnit.SECONDS);
- envConfig.setConfigParam(CLEANER_MIN_UTILIZATION,
- String.valueOf(cfg.getDBCleanerMinUtilization()));
- envConfig.setConfigParam(LOG_FILE_MAX,
- String.valueOf(cfg.getDBLogFileMax()));
- return envConfig;
- }
-
- /** {@inheritDoc} */
- @Override
- public long verifyBackend(VerifyConfig verifyConfig)
- throws InitializationException, ConfigException, DirectoryException
- {
- // If the backend already has the root container open, we must use the same
- // underlying root container
- final boolean openRootContainer = mustOpenRootContainer();
- try
- {
- if (openRootContainer)
- {
- rootContainer = getReadOnlyRootContainer();
- }
-
- VerifyJob verifyJob = new VerifyJob(verifyConfig);
- return verifyJob.verifyBackend(rootContainer);
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- throw createDirectoryException(e);
- }
- catch (JebException e)
- {
- logger.traceException(e);
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- e.getMessageObject());
- }
- finally
- {
- closeTemporaryRootContainer(openRootContainer);
- }
- }
-
-
- /** {@inheritDoc} */
- @Override
- public void rebuildBackend(RebuildConfig rebuildConfig, ServerContext serverContext)
- throws InitializationException, ConfigException, DirectoryException
- {
- // If the backend already has the root container open, we must use the same
- // underlying root container
- boolean openRootContainer = mustOpenRootContainer();
-
- /*
- * If the rootContainer is open, the backend is initialized by something
- * else. We can't do any rebuild of system indexes while others are using
- * this backend.
- */
- final ResultCode errorRC = DirectoryServer.getServerErrorResultCode();
- if(!openRootContainer && rebuildConfig.includesSystemIndex())
- {
- throw new DirectoryException(errorRC, ERR_REBUILD_BACKEND_ONLINE.get());
- }
-
- try
- {
- final EnvironmentConfig envConfig;
- if (openRootContainer)
- {
- envConfig = getEnvConfigForImport();
- rootContainer = initializeRootContainer(envConfig);
- }
- else
- {
- envConfig = parseConfigEntry(cfg);
-
- }
- final Importer importer = new Importer(rebuildConfig, cfg, envConfig, serverContext);
- importer.rebuildIndexes(rootContainer);
- }
- catch (ExecutionException execEx)
- {
- logger.traceException(execEx);
- throw new DirectoryException(errorRC, ERR_EXECUTION_ERROR.get(execEx.getMessage()));
- }
- catch (InterruptedException intEx)
- {
- logger.traceException(intEx);
- throw new DirectoryException(errorRC, ERR_INTERRUPTED_ERROR.get(intEx.getMessage()));
- }
- catch (ConfigException | JebException e)
- {
- logger.traceException(e);
- throw new DirectoryException(errorRC, e.getMessageObject());
- }
- catch (InitializationException e)
- {
- logger.traceException(e);
- throw new InitializationException(e.getMessageObject());
- }
- finally
- {
- closeTemporaryRootContainer(openRootContainer);
- }
- }
-
- /**
- * If a root container was opened in the calling method method as read only,
- * close it to leave the backend in the same state.
- */
- private void closeTemporaryRootContainer(boolean openRootContainer)
- {
- if (openRootContainer && rootContainer != null)
- {
- try
- {
- rootContainer.close();
- rootContainer = null;
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- }
- }
- }
-
-
- /** {@inheritDoc} */
- @Override
- public void createBackup(BackupConfig backupConfig) throws DirectoryException
- {
- new BackupManager(getBackendID()).createBackup(this, backupConfig);
- }
-
- /** {@inheritDoc} */
- @Override
- public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException
- {
- new BackupManager(getBackendID()).removeBackup(backupDirectory, backupID);
- }
-
- /** {@inheritDoc} */
- @Override
- public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException
- {
- new BackupManager(getBackendID()).restoreBackup(this, restoreConfig);
- }
-
- /** {@inheritDoc} */
- @Override
- public ListIterator<Path> getFilesToBackup() throws DirectoryException
- {
- return new JELogFilesIterator(getDirectory(), cfg.getBackendId());
- }
-
- /**
- * Iterator on JE log files to backup.
- * <p>
- * The cleaner thread may delete some log files during the backup. The
- * iterator is automatically renewed if at least one file has been deleted.
- */
- static class JELogFilesIterator implements ListIterator<Path>
- {
- /** Underlying iterator on files. */
- private ListIterator<Path> iterator;
-
- /** Root directory where all files are located. */
- private final File rootDirectory;
-
- private final String backendID;
-
- /** Files to backup. Used to renew the iterator if necessary. */
- private List<Path> files;
-
- private String lastFileName = "";
- private long lastFileSize;
-
- JELogFilesIterator(File rootDirectory, String backendID) throws DirectoryException
- {
- this.rootDirectory = rootDirectory;
- this.backendID = backendID;
- setFiles(BackupManager.getFiles(rootDirectory, new JELogFileFilter(), backendID));
- }
-
- private void setFiles(List<Path> files) {
- this.files = files;
- Collections.sort(files);
- if (!files.isEmpty())
- {
- Path lastFile = files.get(files.size() - 1);
- lastFileName = lastFile.getFileName().toString();
- lastFileSize = lastFile.toFile().length();
- }
- iterator = files.listIterator();
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean hasNext()
- {
- boolean hasNext = iterator.hasNext();
- if (!hasNext && !files.isEmpty())
- {
- try
- {
- List<Path> allFiles = BackupManager.getFiles(rootDirectory, new JELogFileFilter(), backendID);
- List<Path> compare = new ArrayList<>(files);
- compare.removeAll(allFiles);
- if (!compare.isEmpty())
- {
- // at least one file was deleted, the iterator must be renewed based on last file previously available
- List<Path> newFiles =
- BackupManager.getFiles(rootDirectory, new JELogFileFilter(lastFileName, lastFileSize), backendID);
- logger.info(NOTE_JEB_BACKUP_CLEANER_ACTIVITY.get(newFiles.size()));
- if (!newFiles.isEmpty())
- {
- setFiles(newFiles);
- hasNext = iterator.hasNext();
- }
- }
- }
- catch (DirectoryException e)
- {
- logger.error(ERR_BACKEND_LIST_FILES_TO_BACKUP.get(backendID, stackTraceToSingleLineString(e)));
- }
- }
- return hasNext;
- }
-
- /** {@inheritDoc} */
- @Override
- public Path next()
- {
- if (hasNext()) {
- return iterator.next();
- }
- throw new NoSuchElementException();
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean hasPrevious()
- {
- return iterator.hasPrevious();
- }
-
- /** {@inheritDoc} */
- @Override
- public Path previous()
- {
- return iterator.previous();
- }
-
- /** {@inheritDoc} */
- @Override
- public int nextIndex()
- {
- return iterator.nextIndex();
- }
-
- /** {@inheritDoc} */
- @Override
- public int previousIndex()
- {
- return iterator.previousIndex();
- }
-
- /** {@inheritDoc} */
- @Override
- public void remove()
- {
- throw new UnsupportedOperationException("remove() is not implemented");
- }
-
- /** {@inheritDoc} */
- @Override
- public void set(Path e)
- {
- throw new UnsupportedOperationException("set() is not implemented");
- }
-
- /** {@inheritDoc} */
- @Override
- public void add(Path e)
- {
- throw new UnsupportedOperationException("add() is not implemented");
- }
-
- }
-
- /**
- * This class implements a FilenameFilter to detect a JE log file, possibly with a constraint
- * on the file name and file size.
- */
- private static class JELogFileFilter implements FileFilter {
-
- private final String latestFilename;
- private final long latestFileSize;
-
- /**
- * Creates the filter for log files that are newer than provided file name
- * or equal to provided file name and of larger size.
- */
- JELogFileFilter(String latestFilename, long latestFileSize) {
- this.latestFilename = latestFilename;
- this.latestFileSize = latestFileSize;
- }
-
- /** Creates the filter for any JE log file. */
- JELogFileFilter() {
- this("", 0);
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean accept(File file)
- {
- String name = file.getName();
- int cmp = name.compareTo(latestFilename);
- return name.endsWith(".jdb") && (cmp > 0 || (cmp == 0 && file.length() > latestFileSize));
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean isDirectRestore()
- {
- // restore is done in an intermediate directory
- return false;
- }
-
- /** {@inheritDoc} */
- @Override
- public Path beforeRestore() throws DirectoryException
- {
- return null;
- }
-
- /** {@inheritDoc} */
- @Override
- public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException
- {
- // intermediate directory content is moved to database directory
- File targetDirectory = getDirectory();
- recursiveDelete(targetDirectory);
- try
- {
- Files.move(restoreDirectory, targetDirectory.toPath());
- }
- catch(IOException e)
- {
- LocalizableMessage msg = ERR_CANNOT_RENAME_RESTORE_DIRECTORY.get(restoreDirectory, targetDirectory.getPath());
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), msg);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationAcceptable(LocalDBBackendCfg config,
- List<LocalizableMessage> unacceptableReasons,
- ServerContext serverContext)
- {
- return isConfigurationChangeAcceptable(config, unacceptableReasons);
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationChangeAcceptable(
- LocalDBBackendCfg cfg,
- List<LocalizableMessage> unacceptableReasons)
- {
- // Make sure that the logging level value is acceptable.
- try {
- Level.parse(cfg.getDBLoggingLevel());
- return true;
- } catch (Exception e) {
- unacceptableReasons.add(ERR_JEB_INVALID_LOGGING_LEVEL.get(cfg.getDBLoggingLevel(), cfg.dn()));
- return false;
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public ConfigChangeResult applyConfigurationChange(LocalDBBackendCfg newCfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
- try
- {
- if(rootContainer != null)
- {
- SortedSet<DN> newBaseDNs = newCfg.getBaseDN();
- DN[] newBaseDNsArray = newBaseDNs.toArray(new DN[newBaseDNs.size()]);
-
- // Check for changes to the base DNs.
- removeDeletedBaseDNs(newBaseDNs);
- ConfigChangeResult failure = createNewBaseDNs(newBaseDNsArray, ccr);
- if (failure != null)
- {
- return failure;
- }
-
- baseDNs = newBaseDNsArray;
- }
-
- updateDiskMonitor(diskMonitor, newCfg);
-
- // Put the new configuration in place.
- this.cfg = newCfg;
- }
- catch (Exception e)
- {
- ccr.addMessage(LocalizableMessage.raw(stackTraceToSingleLineString(e)));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- }
- return ccr;
- }
-
- private void updateDiskMonitor(DiskSpaceMonitor dm, LocalDBBackendCfg newCfg)
- {
- diskMonitor.registerMonitoredDirectory(getBackendID(), getDirectory(), newCfg.getDiskLowThreshold(),
- newCfg.getDiskFullThreshold(), this);
- }
-
- private void removeDeletedBaseDNs(SortedSet<DN> newBaseDNs) throws DirectoryException
- {
- for (DN baseDN : cfg.getBaseDN())
- {
- if (!newBaseDNs.contains(baseDN))
- {
- // The base DN was deleted.
- DirectoryServer.deregisterBaseDN(baseDN);
- EntryContainer ec = rootContainer.unregisterEntryContainer(baseDN);
- ec.close();
- ec.delete();
- }
- }
- }
-
- private ConfigChangeResult createNewBaseDNs(DN[] newBaseDNsArray, final ConfigChangeResult ccr)
- {
- for (DN baseDN : newBaseDNsArray)
- {
- if (!rootContainer.getBaseDNs().contains(baseDN))
- {
- try
- {
- // The base DN was added.
- EntryContainer ec = rootContainer.openEntryContainer(baseDN, null);
- rootContainer.registerEntryContainer(baseDN, ec);
- DirectoryServer.registerBaseDN(baseDN, this, false);
- }
- catch (Exception e)
- {
- logger.traceException(e);
-
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- ccr.addMessage(ERR_BACKEND_CANNOT_REGISTER_BASEDN.get(baseDN, e));
- return ccr;
- }
- }
- }
- return null;
- }
-
- /**
- * Returns a handle to the JE root container currently used by this backend.
- * The rootContainer could be NULL if the backend is not initialized.
- *
- * @return The RootContainer object currently used by this backend.
- */
- public RootContainer getRootContainer()
- {
- return rootContainer;
- }
-
- /**
- * Returns a new read-only handle to the JE root container for this backend.
- * The caller is responsible for closing the root container after use.
- *
- * @return The read-only RootContainer object for this backend.
- *
- * @throws ConfigException If an unrecoverable problem arises during
- * initialization.
- * @throws InitializationException If a problem occurs during initialization
- * that is not related to the server
- * configuration.
- */
- public RootContainer getReadOnlyRootContainer()
- throws ConfigException, InitializationException
- {
- EnvironmentConfig envConfig = parseConfigEntry(cfg);
-
- envConfig.setReadOnly(true);
- envConfig.setAllowCreate(false);
- envConfig.setTransactional(false);
- envConfig.setConfigParam(ENV_IS_LOCKING, "true");
- envConfig.setConfigParam(ENV_RUN_CHECKPOINTER, "true");
-
- return initializeRootContainer(envConfig);
- }
-
- /**
- * Clears all the entries from the backend. This method is for test cases
- * that use the JE backend.
- *
- * @throws ConfigException If an unrecoverable problem arises in the
- * process of performing the initialization.
- *
- * @throws JebException If an error occurs while removing the data.
- */
- public void clearBackend()
- throws ConfigException, JebException
- {
- // Determine the backend database directory.
- File parentDirectory = getFileForPath(cfg.getDBDirectory());
- File backendDirectory = new File(parentDirectory, cfg.getBackendId());
- EnvManager.removeFiles(backendDirectory.getPath());
- }
-
- /**
- * Creates a customized DirectoryException from the DatabaseException thrown
- * by JE backend.
- *
- * @param e The DatabaseException to be converted.
- * @return DirectoryException created from exception.
- */
- private DirectoryException createDirectoryException(DatabaseException e) {
- if (e instanceof EnvironmentFailureException && !rootContainer.isValid()) {
- LocalizableMessage message = NOTE_BACKEND_ENVIRONMENT_UNUSABLE.get(getBackendID());
- logger.info(message);
- DirectoryServer.sendAlertNotification(DirectoryServer.getInstance(),
- ALERT_TYPE_BACKEND_ENVIRONMENT_UNUSABLE, message);
- }
-
- String jeMessage = e.getMessage();
- if (jeMessage == null) {
- jeMessage = stackTraceToSingleLineString(e);
- }
- LocalizableMessage message = ERR_DATABASE_EXCEPTION.get(jeMessage);
- return new DirectoryException(
- DirectoryServer.getServerErrorResultCode(), message, e);
- }
-
- /** {@inheritDoc} */
- @Override
- public String getClassName() {
- return BackendImpl.class.getName();
- }
-
- /** {@inheritDoc} */
- @Override
- public Map<String, String> getAlerts()
- {
- Map<String, String> alerts = new LinkedHashMap<>();
-
- alerts.put(ALERT_TYPE_BACKEND_ENVIRONMENT_UNUSABLE,
- ALERT_DESCRIPTION_BACKEND_ENVIRONMENT_UNUSABLE);
- alerts.put(ALERT_TYPE_DISK_SPACE_LOW,
- ALERT_DESCRIPTION_DISK_SPACE_LOW);
- alerts.put(ALERT_TYPE_DISK_FULL,
- ALERT_DESCRIPTION_DISK_FULL);
- return alerts;
- }
-
- /** {@inheritDoc} */
- @Override
- public DN getComponentEntryDN() {
- return cfg.dn();
- }
-
- private RootContainer initializeRootContainer(EnvironmentConfig envConfig)
- throws ConfigException, InitializationException {
- // Open the database environment
- try {
- RootContainer rc = new RootContainer(this, cfg);
- rc.open(envConfig);
- return rc;
- }
- catch (DatabaseException e) {
- logger.traceException(e);
- LocalizableMessage message = ERR_OPEN_ENV_FAIL.get(e.getMessage());
- throw new InitializationException(message, e);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void diskLowThresholdReached(File directory, long thresholdInBytes) {
- storageStatus = StorageStatus.lockedDown(
- WARN_DISK_SPACE_LOW_THRESHOLD_CROSSED.get(directory.getFreeSpace(), directory.getAbsolutePath(),
- thresholdInBytes, getBackendID()));
- }
-
- /** {@inheritDoc} */
- @Override
- public void diskFullThresholdReached(File directory, long thresholdInBytes) {
- storageStatus = StorageStatus.unusable(
- WARN_DISK_SPACE_FULL_THRESHOLD_CROSSED.get(directory.getFreeSpace(), directory.getAbsolutePath(),
- thresholdInBytes, getBackendID()));
- }
-
- /** {@inheritDoc} */
- @Override
- public void diskSpaceRestored(File directory, long lowThresholdInBytes, long fullThresholdInBytes) {
- storageStatus = StorageStatus.working();
- }
-
- private void checkDiskSpace(Operation operation) throws DirectoryException
- {
- if(storageStatus.isUnusable() ||
- (storageStatus.isLockedDown()
- && operation != null
- && !operation.getClientConnection().hasPrivilege(
- Privilege.BYPASS_LOCKDOWN, operation)))
- {
- throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM,
- WARN_OUT_OF_DISK_SPACE.get());
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java
index 9437445..cf7511f 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java
@@ -26,9 +26,20 @@
*/
package org.opends.server.backends.jeb;
+import static com.sleepycat.je.EnvironmentConfig.*;
+
+import static org.opends.messages.BackendMessages.*;
+import static org.opends.messages.ConfigMessages.*;
+
import java.lang.reflect.Method;
import java.math.BigInteger;
-import java.util.*;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.StringTokenizer;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -41,10 +52,8 @@
import org.opends.server.admin.DurationPropertyDefinition;
import org.opends.server.admin.PropertyDefinition;
import org.opends.server.admin.std.meta.JEBackendCfgDefn;
-import org.opends.server.admin.std.meta.LocalDBBackendCfgDefn;
import org.opends.server.admin.std.server.BackendCfg;
import org.opends.server.admin.std.server.JEBackendCfg;
-import org.opends.server.admin.std.server.LocalDBBackendCfg;
import org.opends.server.config.ConfigConstants;
import org.opends.server.core.DirectoryServer;
import org.opends.server.core.MemoryQuota;
@@ -54,11 +63,6 @@
import com.sleepycat.je.EnvironmentConfig;
import com.sleepycat.je.dbi.MemoryBudget;
-import static com.sleepycat.je.EnvironmentConfig.*;
-
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.messages.ConfigMessages.*;
-
/** This class maps JE properties to configuration attributes. */
public class ConfigurableEnvironment
{
@@ -191,14 +195,6 @@
/** A map of JE property names to the corresponding configuration attribute. */
private static HashMap<String, String> attrMap = new HashMap<>();
- /**
- * A map of configuration attribute names to the corresponding configuration object getter method.
- */
- @RemoveOnceLocalDBBackendIsPluggable
- private static Map<String, Method> localDbMethodMap = new HashMap<>();
- /** A map of configuration attribute names to the corresponding configuration PropertyDefinition. */
- @RemoveOnceLocalDBBackendIsPluggable
- private static Map<String, PropertyDefinition<?>> localDbDefnMap = new HashMap<>();
/**
* A map of configuration attribute names to the corresponding configuration object getter method.
@@ -245,28 +241,10 @@
String baseName = attrName.substring(7);
String methodBaseName = propNametoCamlCase(baseName);
- registerLocalDbProp(attrName, methodBaseName);
registerJebProp(attrName, methodBaseName);
attrMap.put(propertyName, attrName);
}
- @RemoveOnceLocalDBBackendIsPluggable
- private static void registerLocalDbProp(String attrName, String methodBaseName) throws Exception
- {
- Class<LocalDBBackendCfg> configClass = LocalDBBackendCfg.class;
- LocalDBBackendCfgDefn defn = LocalDBBackendCfgDefn.getInstance();
- Class<? extends LocalDBBackendCfgDefn> defClass = defn.getClass();
-
- String propName = "get" + methodBaseName + "PropertyDefinition";
- PropertyDefinition<?> propDefn = (PropertyDefinition<?>) defClass.getMethod(propName).invoke(defn);
-
- String methodPrefix = propDefn instanceof BooleanPropertyDefinition ? "is" : "get";
- String methodName = methodPrefix + methodBaseName;
-
- localDbDefnMap.put(attrName, propDefn);
- localDbMethodMap.put(attrName, configClass.getMethod(methodName));
- }
-
private static void registerJebProp(String attrName, String methodBaseName) throws Exception
{
Class<JEBackendCfg> configClass = JEBackendCfg.class;
@@ -303,9 +281,8 @@
{
try
{
- final boolean isLocalDb = cfg instanceof LocalDBBackendCfg;
- PropertyDefinition<?> propDefn = (isLocalDb ? localDbDefnMap : jebDefnMap).get(attrName);
- Method method = (isLocalDb ? localDbMethodMap : jebMethodMap).get(attrName);
+ PropertyDefinition<?> propDefn = jebDefnMap.get(attrName);
+ Method method = jebMethodMap.get(attrName);
if (propDefn instanceof DurationPropertyDefinition)
{
@@ -456,30 +433,6 @@
return setJEProperties(envConfig, cfg.getJEProperty(), attrMap);
}
- /**
- * Parse a configuration associated with a JE environment and create an
- * environment config from it.
- *
- * @param cfg The configuration to be parsed.
- * @return An environment config instance corresponding to the config entry.
- * @throws ConfigException If there is an error in the provided configuration
- * entry.
- */
- @RemoveOnceLocalDBBackendIsPluggable
- public static EnvironmentConfig parseConfigEntry(LocalDBBackendCfg cfg) throws ConfigException
- {
- validateDbCacheSize(cfg.getDBCacheSize());
-
- EnvironmentConfig envConfig = defaultConfig();
- setDurability(envConfig, cfg.isDBTxnNoSync(), cfg.isDBTxnWriteNoSync());
- setJEProperties(cfg, envConfig, cfg.dn().rdn().getAttributeValue(0));
- setDBLoggingLevel(envConfig, cfg.getDBLoggingLevel(), cfg.dn(), cfg.isDBLoggingFileHandlerOn());
-
- // See if there are any native JE properties specified in the config
- // and if so try to parse, evaluate and set them.
- return setJEProperties(envConfig, cfg.getJEProperty(), attrMap);
- }
-
private static void validateDbCacheSize(long dbCacheSize) throws ConfigException
{
if (dbCacheSize != 0)
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DBTest.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DBTest.java
deleted file mode 100644
index 8988d7b..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DBTest.java
+++ /dev/null
@@ -1,1500 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2013-2015 ForgeRock AS.
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.messages.ToolMessages.*;
-import static org.opends.server.util.StaticUtils.*;
-
-import static com.forgerock.opendj.cli.ArgumentConstants.*;
-import static com.forgerock.opendj.cli.Utils.*;
-
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.text.ParseException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.forgerock.opendj.ldap.ByteSequence;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.opends.server.admin.std.server.BackendCfg;
-import org.opends.server.admin.std.server.LocalDBBackendCfg;
-import org.opends.server.api.Backend;
-import org.opends.server.core.CoreConfigManager;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.core.DirectoryServer.DirectoryServerVersionHandler;
-import org.opends.server.core.LockFileManager;
-import org.opends.server.extensions.ConfigFileHandler;
-import org.opends.server.loggers.JDKLogging;
-import org.opends.server.tools.BackendToolUtils;
-import org.opends.server.types.DN;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.InitializationException;
-import org.opends.server.types.NullOutputStream;
-import org.opends.server.types.SortKey;
-import org.opends.server.util.BuildVersion;
-import org.opends.server.util.StaticUtils;
-
-import com.forgerock.opendj.cli.Argument;
-import com.forgerock.opendj.cli.ArgumentException;
-import com.forgerock.opendj.cli.BooleanArgument;
-import com.forgerock.opendj.cli.CommonArguments;
-import com.forgerock.opendj.cli.IntegerArgument;
-import com.forgerock.opendj.cli.StringArgument;
-import com.forgerock.opendj.cli.SubCommand;
-import com.forgerock.opendj.cli.SubCommandArgumentParser;
-import com.forgerock.opendj.cli.TableBuilder;
-import com.forgerock.opendj.cli.TextTablePrinter;
-import com.sleepycat.je.Cursor;
-import com.sleepycat.je.CursorConfig;
-import com.sleepycat.je.DatabaseEntry;
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.LockMode;
-import com.sleepycat.je.OperationStatus;
-
-/**
- * This program provides a utility that may be used to debug a JE backend. This
- * tool provides the ability to list various containers in the backend as well as
- * dump the contents of database containers. This will be
- * a process that is intended to run separate from Directory Server and not
- * internally within the server process (e.g., via the tasks interface).
- */
-public class DBTest
-{
- /** The error stream which this application should use. */
- private final PrintStream err;
-
- /** The output stream which this application should use. */
- private final PrintStream out;
-
- /**
- * Flag indicating whether or not the global arguments have already been
- * initialized.
- */
- private boolean globalArgumentsInitialized;
-
- /** The command-line argument parser. */
- private final SubCommandArgumentParser parser;
-
- /** The argument which should be used to request usage information. */
- private BooleanArgument showUsageArgument;
-
- /** The argument which should be used to specify the config class. */
- private StringArgument configClass;
-
- /** THe argument which should be used to specify the config file. */
- private StringArgument configFile;
-
- /**
- * Flag indicating whether or not the sub-commands have already been
- * initialized.
- */
- private boolean subCommandsInitialized;
-
-
-
- /**
- * Provides the command-line arguments to the main application for
- * processing.
- *
- * @param args
- * The set of command-line arguments provided to this
- * program.
- */
- public static void main(String[] args) {
- int exitCode = main(args, true, System.out, System.err);
- if (exitCode != 0) {
- System.exit(filterExitCode(exitCode));
- }
- }
-
-
- /**
- * Provides the command-line arguments to the main application for
- * processing and returns the exit code as an integer.
- *
- * @param args
- * The set of command-line arguments provided to this
- * program.
- * @param initializeServer
- * Indicates whether to perform basic initialization (which
- * should not be done if the tool is running in the same
- * JVM as the server).
- * @param outStream
- * The output stream for standard output.
- * @param errStream
- * The output stream for standard error.
- * @return Zero to indicate that the program completed successfully,
- * or non-zero to indicate that an error occurred.
- */
- public static int main(String[] args, boolean initializeServer,
- OutputStream outStream, OutputStream errStream) {
- DBTest app = new DBTest(outStream, errStream);
-
- // Run the application.
- return app.run(args, initializeServer);
- }
-
- /**
- * Creates a new dsconfig application instance.
- *
- * @param out
- * The application output stream.
- * @param err
- * The application error stream.
- */
- public DBTest(OutputStream out, OutputStream err)
- {
- this.out = NullOutputStream.wrapOrNullStream(out);
- this.err = NullOutputStream.wrapOrNullStream(err);
- JDKLogging.disableLogging();
-
- LocalizableMessage toolDescription = INFO_DESCRIPTION_DBTEST_TOOL.get();
- this.parser = new SubCommandArgumentParser(getClass().getName(), toolDescription, false);
- this.parser.setShortToolDescription(REF_SHORT_DESC_DBTEST.get());
- this.parser.setVersionHandler(new DirectoryServerVersionHandler());
- }
-
- /**
- * Registers the global arguments with the argument parser.
- *
- * @throws ArgumentException
- * If a global argument could not be registered.
- */
- private void initializeGlobalArguments() throws ArgumentException {
- if (!globalArgumentsInitialized) {
- configClass =
- new StringArgument("configclass", OPTION_SHORT_CONFIG_CLASS,
- OPTION_LONG_CONFIG_CLASS, true, false,
- true, INFO_CONFIGCLASS_PLACEHOLDER.get(),
- ConfigFileHandler.class.getName(), null,
- INFO_DESCRIPTION_CONFIG_CLASS.get());
- configClass.setHidden(true);
-
- configFile =
- new StringArgument("configfile", 'f', "configFile", true, false,
- true, INFO_CONFIGFILE_PLACEHOLDER.get(), null,
- null,
- INFO_DESCRIPTION_CONFIG_FILE.get());
- configFile.setHidden(true);
-
-
- showUsageArgument = CommonArguments.getShowUsage();
-
- // Register the global arguments.
- parser.addGlobalArgument(showUsageArgument);
- parser.setUsageArgument(showUsageArgument, out);
- parser.addGlobalArgument(configClass);
- parser.addGlobalArgument(configFile);
-
- globalArgumentsInitialized = true;
- }
- }
-
-
-
- /**
- * Registers the sub-commands with the argument parser.
- *
- * @throws ArgumentException
- * If a sub-command could not be created.
- */
- private void initializeSubCommands() throws ArgumentException {
- if (!subCommandsInitialized) {
- StringArgument backendID;
- StringArgument baseDN;
- StringArgument databaseName;
- BooleanArgument skipDecode;
- BooleanArgument statsOnly;
- StringArgument maxKeyValue;
- StringArgument minKeyValue;
- IntegerArgument maxDataSize;
- IntegerArgument minDataSize;
- SubCommand sub;
-
- sub = new SubCommand(parser, "list-root-containers",
- INFO_DESCRIPTION_DBTEST_SUBCMD_LIST_ROOT_CONTAINERS.get());
-
-
- sub = new SubCommand(parser, "list-entry-containers",
- INFO_DESCRIPTION_BACKEND_DEBUG_SUBCMD_LIST_ENTRY_CONTAINERS.get());
- backendID =
- new StringArgument("backendid", 'n', "backendID", true, false, true,
- INFO_BACKENDNAME_PLACEHOLDER.get(), null, null,
- INFO_DESCRIPTION_BACKEND_DEBUG_BACKEND_ID.get());
- sub.addArgument(backendID);
-
-
- sub = new SubCommand(parser, "list-database-containers",
- INFO_DESCRIPTION_DBTEST_SUBCMD_LIST_DATABASE_CONTAINERS.get());
- backendID =
- new StringArgument("backendid", 'n', "backendID", true, false, true,
- INFO_BACKENDNAME_PLACEHOLDER.get(), null, null,
- INFO_DESCRIPTION_BACKEND_DEBUG_BACKEND_ID.get());
- sub.addArgument(backendID);
- baseDN =
- new StringArgument("basedn", 'b', "baseDN", false,
- false, true, INFO_BASEDN_PLACEHOLDER.get(), null,
- null,
- INFO_DESCRIPTION_BACKEND_DEBUG_BASE_DN.get());
- sub.addArgument(baseDN);
-
-
- sub = new SubCommand(parser, "dump-database-container",
- INFO_DESCRIPTION_DBTEST_SUBCMD_DUMP_DATABASE_CONTAINER.get());
- backendID =
- new StringArgument("backendid", 'n', "backendID", true, false, true,
- INFO_BACKENDNAME_PLACEHOLDER.get(), null, null,
- INFO_DESCRIPTION_BACKEND_DEBUG_BACKEND_ID.get());
- sub.addArgument(backendID);
- baseDN =
- new StringArgument("basedn", 'b', "baseDN", true,
- false, true, INFO_BASEDN_PLACEHOLDER.get(), null,
- null,
- INFO_DESCRIPTION_BACKEND_DEBUG_BASE_DN.get());
- sub.addArgument(baseDN);
- databaseName =
- new StringArgument("databasename", 'd', "databaseName", true,
- false, true, INFO_DATABASE_NAME_PLACEHOLDER.get(),
- null, null,
- INFO_DESCRIPTION_BACKEND_DEBUG_INDEX_NAME.get());
- sub.addArgument(databaseName);
- skipDecode =
- new BooleanArgument("skipdecode", 'p', "skipDecode",
- INFO_DESCRIPTION_BACKEND_DEBUG_SKIP_DECODE.get());
- sub.addArgument(skipDecode);
- statsOnly =
- new BooleanArgument("statsonly", 'q', "statsOnly",
- INFO_DESCRIPTION_BACKEND_DEBUG_STATS_ONLY.get());
- sub.addArgument(statsOnly);
- maxKeyValue = new StringArgument("maxkeyvalue", 'K', "maxKeyValue", false,
- false, true,
- INFO_MAX_KEY_VALUE_PLACEHOLDER.get(),
- null, null,
- INFO_DESCRIPTION_BACKEND_DEBUG_MAX_KEY_VALUE.get());
- sub.addArgument(maxKeyValue);
- minKeyValue = new StringArgument("minkeyvalue", 'k', "minKeyValue", false,
- false, true,
- INFO_MIN_KEY_VALUE_PLACEHOLDER.get(),
- null,
- null,
- INFO_DESCRIPTION_BACKEND_DEBUG_MIN_KEY_VALUE.get());
- sub.addArgument(minKeyValue);
- maxDataSize = new IntegerArgument("maxdatasize", 'S', "maxDataSize",
- false, false, true,
- INFO_MAX_DATA_SIZE_PLACEHOLDER.get(),
- -1,
- null,
- INFO_DESCRIPTION_BACKEND_DEBUG_MAX_DATA_SIZE.get());
- sub.addArgument(maxDataSize);
- minDataSize = new IntegerArgument("mindatasize", 's', "minDataSize",
- false, false, true,
- INFO_MIN_DATA_SIZE_PLACEHOLDER.get(),
- -1,
- null,
- INFO_DESCRIPTION_BACKEND_DEBUG_MIN_DATA_SIZE.get());
- sub.addArgument(minDataSize);
-
-
- sub = new SubCommand(parser, "list-index-status",
- INFO_DESCRIPTION_BACKEND_DEBUG_SUBCMD_LIST_INDEX_STATUS.get());
- sub.setDocDescriptionSupplement(
- SUPPLEMENT_DESCRIPTION_DBTEST_SUBCMD_LIST_INDEX_STATUS.get());
- backendID =
- new StringArgument("backendid", 'n', "backendID", true, false, true,
- INFO_BACKENDNAME_PLACEHOLDER.get(), null, null,
- INFO_DESCRIPTION_BACKEND_DEBUG_BACKEND_ID.get());
- sub.addArgument(backendID);
- baseDN =
- new StringArgument("basedn", 'b', "baseDN", true,
- true, true, INFO_BASEDN_PLACEHOLDER.get(), null,
- null,
- INFO_DESCRIPTION_BACKEND_DEBUG_BASE_DN.get());
- sub.addArgument(baseDN);
-
- subCommandsInitialized = true;
- }
- }
-
-
- /**
- * Parses the provided command-line arguments and makes the
- * appropriate changes to the Directory Server configuration.
- *
- * @param args
- * The command-line arguments provided to this program.
- * @param initializeServer
- * Indicates whether to perform basic initialization (which
- * should not be done if the tool is running in the same
- * JVM as the server).
- * @return The exit code from the configuration processing. A
- * nonzero value indicates that there was some kind of
- * problem during the configuration processing.
- */
- private int run(String[] args, boolean initializeServer) {
-
- // Register global arguments and sub-commands.
- try {
- initializeGlobalArguments();
- initializeSubCommands();
- } catch (ArgumentException e) {
- printWrappedText(err, ERR_CANNOT_INITIALIZE_ARGS.get(e.getMessage()));
- return 1;
- }
-
- // Parse the command-line arguments provided to this program.
- try {
- parser.parseArguments(args);
- } catch (ArgumentException ae) {
- parser.displayMessageAndUsageReference(err, ERR_ERROR_PARSING_ARGS.get(ae.getMessage()));
- return 1;
- }
-
- // If the usage/version argument was provided, then we don't need
- // to do anything else.
- if (parser.usageOrVersionDisplayed()) {
- return 0;
- }
-
- // Checks the version - if upgrade required, the tool is unusable
- try
- {
- BuildVersion.checkVersionMismatch();
- }
- catch (InitializationException e)
- {
- printWrappedText(err, e.getMessageObject());
- return 1;
- }
-
- // Only initialize the server when run as a standalone
- // application.
- if (initializeServer) {
- // Perform the initial bootstrap of the Directory Server and process the
- // configuration.
- DirectoryServer directoryServer = DirectoryServer.getInstance();
- try
- {
- DirectoryServer.bootstrapClient();
- DirectoryServer.initializeJMX();
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_SERVER_BOOTSTRAP_ERROR.get(getExceptionMessage(e)));
- return 1;
- }
-
- try
- {
- directoryServer.initializeConfiguration(configClass.getValue(),
- configFile.getValue());
- }
- catch (InitializationException ie)
- {
- printWrappedText(err, ERR_CANNOT_LOAD_CONFIG.get(ie.getMessage()));
- return 1;
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_CANNOT_LOAD_CONFIG.get(getExceptionMessage(e)));
- return 1;
- }
-
-
-
- // Initialize the Directory Server schema elements.
- try
- {
- directoryServer.initializeSchema();
- }
- catch (ConfigException | InitializationException e)
- {
- printWrappedText(err, ERR_CANNOT_LOAD_SCHEMA.get(e.getMessage()));
- return 1;
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_CANNOT_LOAD_SCHEMA.get(getExceptionMessage(e)));
- return 1;
- }
-
-
-
- // Initialize the Directory Server core configuration.
- try
- {
- CoreConfigManager coreConfigManager = new CoreConfigManager(directoryServer.getServerContext());
- coreConfigManager.initializeCoreConfig();
- }
- catch (ConfigException | InitializationException e)
- {
- printWrappedText(err, ERR_CANNOT_INITIALIZE_CORE_CONFIG.get(e.getMessage()));
- return 1;
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_CANNOT_INITIALIZE_CORE_CONFIG.get(getExceptionMessage(e)));
- return 1;
- }
-
-
- // Initialize the Directory Server crypto manager.
- try
- {
- directoryServer.initializeCryptoManager();
- }
- catch (ConfigException | InitializationException e)
- {
- printWrappedText(err, ERR_CANNOT_INITIALIZE_CRYPTO_MANAGER.get(e.getMessage()));
- return 1;
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_CANNOT_INITIALIZE_CRYPTO_MANAGER.get(getExceptionMessage(e)));
- return 1;
- }
- }
-
- // Make sure that we have a sub-command.
- if (parser.getSubCommand() == null)
- {
- parser.displayMessageAndUsageReference(err, ERR_BACKEND_DEBUG_MISSING_SUBCOMMAND.get());
- return 1;
- }
-
- // Retrieve the sub-command implementation and run it.
- SubCommand subCommand = parser.getSubCommand();
- try {
- if("list-root-containers".equals(subCommand.getName()))
- {
- return listRootContainers();
- }
- else if("list-entry-containers".equals(subCommand.getName()))
- {
- return listEntryContainers(subCommand.getArgument("backendid"));
- }
- else if("list-database-containers".equals(subCommand.getName()))
- {
- return listDatabaseContainers(subCommand.getArgument("backendid"),
- subCommand.getArgument("basedn"));
- }
- else if("dump-database-container".equals(subCommand.getName()))
- {
- return dumpDatabaseContainer(subCommand.getArgument("backendid"),
- subCommand.getArgument("basedn"),
- subCommand.getArgument("databasename"),
- subCommand.getArgument("skipdecode"),
- subCommand.getArgument("statsonly"),
- subCommand.getArgument("maxkeyvalue"),
- subCommand.getArgument("minkeyvalue"),
- subCommand.getArgument("maxdatasize"),
- subCommand.getArgument("mindatasize"));
- }
- else if("list-index-status".equals(subCommand.getName()))
- {
- return listIndexStatus(subCommand.getArgument("backendid"),
- subCommand.getArgument("basedn"));
- }
- return 0;
- } catch (Exception e) {
- printWrappedText(err, LocalizableMessage.raw(StaticUtils.stackTraceToString(e)));
- return 1;
- }
- }
-
- private int listRootContainers()
- {
- final Map<LocalDBBackendCfg, BackendImpl> jeBackends = getJEBackends(null);
- int count = 0;
-
- // Create a table of their properties.
- TableBuilder builder = new TableBuilder();
-
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_BACKEND_ID.get());
- builder.appendHeading(INFO_LABEL_DBTEST_DB_DIRECTORY.get());
-
- for(Map.Entry<LocalDBBackendCfg, BackendImpl> backend :
- jeBackends.entrySet())
- {
- builder.startRow();
- builder.appendCell(backend.getValue().getBackendID());
- builder.appendCell(backend.getKey().getDBDirectory());
- count++;
- }
-
- TextTablePrinter printer = new TextTablePrinter(out);
- builder.print(printer);
- out.format("%nTotal: %d%n", count);
-
- return 0;
- }
-
- private int listEntryContainers(Argument backendID)
- {
- BackendImpl backend = getBackendById(backendID);
- if(backend == null)
- {
- return 1;
- }
-
- // Acquire an shared lock for the backend.
- try
- {
- String lockFile = LockFileManager.getBackendLockFileName(backend);
- StringBuilder failureReason = new StringBuilder();
- if (! LockFileManager.acquireSharedLock(lockFile, failureReason))
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), failureReason));
- return 1;
- }
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), getExceptionMessage(e)));
- return 1;
- }
-
- RootContainer rc;
- try
- {
- rc = backend.getReadOnlyRootContainer();
- }
- catch(Exception e)
- {
- printWrappedText(
- err, ERR_DBTEST_ERROR_INITIALIZING_BACKEND.get(backend.getBackendID(), stackTraceToSingleLineString(e)));
- return 1;
- }
-
- try
- {
- // Create a table of their properties.
- TableBuilder builder = new TableBuilder();
- int count = 0;
-
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_BASE_DN.get());
- builder.appendHeading(INFO_LABEL_DBTEST_JE_DATABASE_PREFIX.get());
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_ENTRY_COUNT.get());
-
- for(EntryContainer ec : rc.getEntryContainers())
- {
- builder.startRow();
- builder.appendCell(ec.getBaseDN());
- builder.appendCell(ec.getDatabasePrefix());
- builder.appendCell(ec.getEntryCount());
- count++;
- }
-
- TextTablePrinter printer = new TextTablePrinter(out);
- builder.print(printer);
- out.format("%nTotal: %d%n", count);
-
- return 0;
- }
- catch(DatabaseException de)
- {
- printWrappedText(err, ERR_DBTEST_ERROR_READING_DATABASE.get(stackTraceToSingleLineString(de)));
- return 1;
- }
- finally
- {
- close(rc);
- releaseSharedLock(backend);
- }
- }
-
- private int listDatabaseContainers(Argument backendID, Argument baseDN)
- {
- BackendImpl backend = getBackendById(backendID);
- if(backend == null)
- {
- return 1;
- }
-
- DN base = null;
- if(baseDN.isPresent())
- {
- try
- {
- base = DN.valueOf(baseDN.getValue());
- }
- catch(DirectoryException de)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_DECODE_BASE_DN.get(baseDN.getValue(), getExceptionMessage(de)));
- return 1;
- }
- }
-
- // Acquire an shared lock for the backend.
- try
- {
- String lockFile = LockFileManager.getBackendLockFileName(backend);
- StringBuilder failureReason = new StringBuilder();
- if (! LockFileManager.acquireSharedLock(lockFile, failureReason))
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), failureReason));
- return 1;
- }
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), getExceptionMessage(e)));
- return 1;
- }
-
- RootContainer rc;
- try
- {
- rc = backend.getReadOnlyRootContainer();
- }
- catch(Exception e)
- {
- printWrappedText(
- err, ERR_DBTEST_ERROR_INITIALIZING_BACKEND.get(backend.getBackendID(), stackTraceToSingleLineString(e)));
- return 1;
- }
-
-
- try
- {
- // Create a table of their properties.
- TableBuilder builder = new TableBuilder();
- int count = 0;
-
- builder.appendHeading(INFO_LABEL_DBTEST_DATABASE_NAME.get());
- builder.appendHeading(INFO_LABEL_DBTEST_DATABASE_TYPE.get());
- builder.appendHeading(INFO_LABEL_DBTEST_JE_DATABASE_NAME.get());
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_ENTRY_COUNT.get());
-
- if(base != null)
- {
- EntryContainer ec = rc.getEntryContainer(base);
- if(ec == null)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_NO_ENTRY_CONTAINERS_FOR_BASE_DN.get(base, backend.getBackendID()));
- return 1;
- }
-
- count = appendDatabaseContainerRows(builder, ec, count);
- }
- else
- {
- for(EntryContainer ec : rc.getEntryContainers())
- {
- builder.startRow();
- builder.appendCell("Base DN: " + ec.getBaseDN());
- count = appendDatabaseContainerRows(builder, ec, count);
- }
- }
-
- TextTablePrinter printer = new TextTablePrinter(out);
- builder.print(printer);
- out.format("%nTotal: %d%n", count);
-
- return 0;
- }
- catch(DatabaseException de)
- {
- printWrappedText(err, ERR_DBTEST_ERROR_READING_DATABASE.get(stackTraceToSingleLineString(de)));
- return 1;
- }
- finally
- {
- close(rc);
- releaseSharedLock(backend);
- }
- }
-
- private int appendDatabaseContainerRows(TableBuilder builder, EntryContainer ec, int count)
- {
- ArrayList<DatabaseContainer> databaseContainers = new ArrayList<>();
- ec.listDatabases(databaseContainers);
- String toReplace = ec.getDatabasePrefix() + "_";
- for(DatabaseContainer dc : databaseContainers)
- {
- builder.startRow();
- builder.appendCell(dc.getName().replace(toReplace, ""));
- builder.appendCell(dc.getClass().getSimpleName());
- builder.appendCell(dc.getName());
- builder.appendCell(dc.getRecordCount());
- count++;
- }
- return count;
- }
-
- private void close(RootContainer rc)
- {
- try
- {
- rc.close();
- }
- catch(DatabaseException ignored)
- {
- // Ignore.
- }
- }
-
- private void releaseSharedLock(BackendImpl backend)
- {
- try
- {
- String lockFile = LockFileManager.getBackendLockFileName(backend);
- StringBuilder failureReason = new StringBuilder();
- if (!LockFileManager.releaseLock(lockFile, failureReason))
- {
- printWrappedText(err, WARN_BACKEND_DEBUG_CANNOT_UNLOCK_BACKEND.get(backend.getBackendID(), failureReason));
- }
- }
- catch (Exception e)
- {
- printWrappedText(err, WARN_BACKEND_DEBUG_CANNOT_UNLOCK_BACKEND.get(
- backend.getBackendID(), getExceptionMessage(e)));
- }
- }
-
- private BackendImpl getBackendById(Argument backendId)
- {
- final String backendID = backendId.getValue();
- final List<Backend<?>> otherBackends = new ArrayList<>();
- final Map<LocalDBBackendCfg, BackendImpl> jeBackends = getJEBackends(otherBackends);
-
- for (BackendImpl b : jeBackends.values())
- {
- if (b.getBackendID().equalsIgnoreCase(backendID))
- {
- return b;
- }
- }
-
- for (Backend<?> b : otherBackends)
- {
- if (b.getBackendID().equalsIgnoreCase(backendID))
- {
- printWrappedText(err, ERR_DBTEST_NOT_JE_BACKEND.get(backendID));
- return null;
- }
- }
- printWrappedText(err, ERR_BACKEND_DEBUG_NO_BACKENDS_FOR_ID.get(backendID));
- return null;
- }
-
- private int listIndexStatus(Argument backendID, Argument baseDN)
- {
- BackendImpl backend = getBackendById(backendID);
- if(backend == null)
- {
- return 1;
- }
-
- DN base = null;
- if(baseDN.isPresent())
- {
- try
- {
- base = DN.valueOf(baseDN.getValue());
- }
- catch(DirectoryException de)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_DECODE_BASE_DN.get(baseDN.getValue(), getExceptionMessage(de)));
- return 1;
- }
- }
-
- // Acquire an shared lock for the backend.
- try
- {
- String lockFile = LockFileManager.getBackendLockFileName(backend);
- StringBuilder failureReason = new StringBuilder();
- if (! LockFileManager.acquireSharedLock(lockFile, failureReason))
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), failureReason));
- return 1;
- }
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), getExceptionMessage(e)));
- return 1;
- }
-
- RootContainer rc;
- try
- {
- rc = backend.getReadOnlyRootContainer();
- }
- catch(Exception e)
- {
- printWrappedText(
- err, ERR_DBTEST_ERROR_INITIALIZING_BACKEND.get(backend.getBackendID(), stackTraceToSingleLineString(e)));
- return 1;
- }
-
-
- try
- {
- // Create a table of their properties.
- TableBuilder builder = new TableBuilder();
- int count = 0;
-
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_INDEX_NAME.get());
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_INDEX_TYPE.get());
- builder.appendHeading(INFO_LABEL_DBTEST_JE_DATABASE_NAME.get());
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_INDEX_STATUS.get());
- builder.appendHeading(INFO_LABEL_BACKEND_DEBUG_RECORD_COUNT.get());
- builder.appendHeading(
- INFO_LABEL_DBTEST_INDEX_UNDEFINED_RECORD_COUNT.get());
- builder.appendHeading(LocalizableMessage.raw("95%"));
- builder.appendHeading(LocalizableMessage.raw("90%"));
- builder.appendHeading(LocalizableMessage.raw("85%"));
-
-
- EntryContainer ec = rc.getEntryContainer(base);
- if(ec == null)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_NO_ENTRY_CONTAINERS_FOR_BASE_DN.get(base, backend.getBackendID()));
- return 1;
- }
-
- ArrayList<DatabaseContainer> databaseContainers = new ArrayList<>();
- Map<Index, StringBuilder> undefinedKeys = new HashMap<>();
- ec.listDatabases(databaseContainers);
- String toReplace = ec.getDatabasePrefix() + "_";
- for(DatabaseContainer dc : databaseContainers)
- {
- if(dc instanceof Index || dc instanceof VLVIndex)
- {
- builder.startRow();
- builder.appendCell(dc.getName().replace(toReplace, ""));
- builder.appendCell(dc.getClass().getSimpleName());
- builder.appendCell(dc.getName());
- builder.appendCell(ec.getState().getIndexTrustState(null, dc));
- builder.appendCell(dc.getRecordCount());
-
- if(dc instanceof Index)
- {
- Index index = (Index)dc;
- long undefined = 0, ninetyFive = 0, ninety = 0, eighty = 0;
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- LockMode lockMode = LockMode.DEFAULT;
- OperationStatus status;
-
- Cursor cursor = dc.openCursor(null, CursorConfig.DEFAULT);
- status = cursor.getFirst(key, data, lockMode);
- while(status == OperationStatus.SUCCESS)
- {
- byte[] bytes = data.getData();
- if (bytes.length == 0 || (bytes[0] & 0x80) == 0x80)
- {
- // Entry limit has exceeded and there is no encoded
- // undefined set size.
- undefined ++;
- StringBuilder keyList = undefinedKeys.get(index);
- if(keyList == null)
- {
- keyList = new StringBuilder();
- undefinedKeys.put(index, keyList);
- }
- else
- {
- keyList.append(" ");
- }
- if(index == ec.getID2Children() || index == ec.getID2Subtree())
- {
- keyList.append("[").append(
- JebFormat.entryIDFromDatabase(key.getData())).append("]");
- }
- else
- {
- keyList.append("[").append(
- new String(key.getData())).append("]");
- }
- }
- else
- {
- // Seems like entry limit has not been exceeded and the bytes
- // is a list of entry IDs.
- double percentFull =
- (bytes.length / (double)8) / index.getIndexEntryLimit();
- if(percentFull >= .8)
- {
- if(percentFull < .9)
- {
- eighty++;
- }
- else if(percentFull < .95)
- {
- ninety++;
- }
- else
- {
- ninetyFive++;
- }
- }
- }
- status = cursor.getNext(key, data, lockMode);
- }
- builder.appendCell(undefined);
- builder.appendCell(ninetyFive);
- builder.appendCell(ninety);
- builder.appendCell(eighty);
- cursor.close();
- }
- else
- {
- builder.appendCell("-");
- builder.appendCell("-");
- builder.appendCell("-");
- builder.appendCell("-");
- }
-
- count++;
- }
- }
-
- TextTablePrinter printer = new TextTablePrinter(out);
- builder.print(printer);
- out.format("%nTotal: %d%n", count);
- for(Map.Entry<Index, StringBuilder> e : undefinedKeys.entrySet())
- {
- out.format("%nIndex: %s%n", e.getKey().getName().replace(toReplace, ""));
- out.format("Undefined keys: %s%n", e.getValue().toString());
- }
- return 0;
- }
- catch(DatabaseException de)
- {
- printWrappedText(err, ERR_DBTEST_ERROR_READING_DATABASE.get(stackTraceToSingleLineString(de)));
- return 1;
- }
- finally
- {
- close(rc);
- releaseSharedLock(backend);
- }
- }
-
- private int dumpDatabaseContainer(Argument backendID, Argument baseDN,
- Argument databaseName, Argument skipDecode,
- Argument statsOnly,
- Argument maxKeyValue, Argument minKeyValue,
- Argument maxDataSize, Argument minDataSize)
- {
- BackendImpl backend = getBackendById(backendID);
- if(backend == null)
- {
- return 1;
- }
-
- DN base = null;
- try
- {
- base = DN.valueOf(baseDN.getValue());
- }
- catch(DirectoryException de)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_DECODE_BASE_DN.get(baseDN.getValue(), getExceptionMessage(de)));
- return 1;
- }
-
- // Acquire an shared lock for the backend.
- try
- {
- String lockFile = LockFileManager.getBackendLockFileName(backend);
- StringBuilder failureReason = new StringBuilder();
- if (! LockFileManager.acquireSharedLock(lockFile, failureReason))
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), failureReason));
- return 1;
- }
- }
- catch (Exception e)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_CANNOT_LOCK_BACKEND.get(backend.getBackendID(), getExceptionMessage(e)));
- return 1;
- }
-
- RootContainer rc;
- try
- {
- rc = backend.getReadOnlyRootContainer();
- }
- catch(Exception e)
- {
- printWrappedText(
- err, ERR_DBTEST_ERROR_INITIALIZING_BACKEND.get(backend.getBackendID(), stackTraceToSingleLineString(e)));
- return 1;
- }
-
- try
- {
- EntryContainer ec = rc.getEntryContainer(base);
- if(ec == null)
- {
- printWrappedText(err, ERR_BACKEND_DEBUG_NO_ENTRY_CONTAINERS_FOR_BASE_DN.get(base, backend.getBackendID()));
- return 1;
- }
-
- DatabaseContainer databaseContainer = null;
- ArrayList<DatabaseContainer> databaseContainers = new ArrayList<>();
- ec.listDatabases(databaseContainers);
- String toReplace = ec.getDatabasePrefix() + "_";
- for(DatabaseContainer dc : databaseContainers)
- {
- if(dc.getName().replace(toReplace, "").equalsIgnoreCase(databaseName.getValue()))
- {
- databaseContainer = dc;
- break;
- }
- }
-
- if(databaseContainer == null)
- {
- printWrappedText(
- err, ERR_DBTEST_NO_DATABASE_CONTAINERS_FOR_NAME.get(databaseName.getValue(), base, backend.getBackendID()));
- return 1;
- }
-
- int count = 0;
- long totalKeySize = 0;
- long totalDataSize = 0;
- int indent = 4;
-
- Cursor cursor = databaseContainer.openCursor(null, CursorConfig.DEFAULT);
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- LockMode lockMode = LockMode.DEFAULT;
- OperationStatus status;
- byte[] start = null;
- byte[] end = null;
- int minSize = -1;
- int maxSize = -1;
-
- if(maxDataSize.isPresent())
- {
- try
- {
- maxSize = maxDataSize.getIntValue();
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_CANNOT_DECODE_SIZE.get(maxDataSize.getValue(), getExceptionMessage(e)));
- return 1;
- }
- }
-
- if(minDataSize.isPresent())
- {
- try
- {
- minSize = minDataSize.getIntValue();
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_CANNOT_DECODE_SIZE.get(minDataSize.getValue(), getExceptionMessage(e)));
- return 1;
- }
- }
-
- // Parse the min value if given
- if(minKeyValue.isPresent())
- {
- try
- {
- start = parseKeyValue(minKeyValue.getValue(), databaseContainer);
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_CANNOT_DECODE_KEY.get(minKeyValue.getValue(), getExceptionMessage(e)));
- return 1;
- }
- }
-
- // Parse the max value if given
- if(maxKeyValue.isPresent())
- {
- try
- {
- end = parseKeyValue(maxKeyValue.getValue(), databaseContainer);
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_CANNOT_DECODE_KEY.get(maxKeyValue.getValue(), getExceptionMessage(e)));
- return 1;
- }
- }
-
-
- if(start != null)
- {
- key.setData(start);
- status = cursor.getSearchKey(key, data, lockMode);
- }
- else
- {
- status = cursor.getFirst(key, data, lockMode);
- }
-
- final String lineSep = System.getProperty("line.separator");
- while(status == OperationStatus.SUCCESS)
- {
- // Make sure this record is within the value size params
- if((minSize > 0 && data.getSize() < minSize) ||
- (maxSize > 0 && data.getSize() > maxSize))
- {
- status = cursor.getNext(key, data, lockMode);
- continue;
- }
-
- // Make sure we haven't gone pass the max value yet
- if(end != null
- && getComparator(databaseContainer).compare(key.getData(), end) > 0)
- {
- break;
- }
-
- if (!statsOnly.isPresent())
- {
- LocalizableMessage keyLabel = INFO_LABEL_DBTEST_KEY.get();
- LocalizableMessage dataLabel = INFO_LABEL_DBTEST_DATA.get();
-
- String formatedKey = null;
- String formatedData = null;
-
- if(!skipDecode.isPresent())
- {
- if(databaseContainer instanceof DN2ID)
- {
- try
- {
- formatedKey = new String(key.getData()) + ec.getBaseDN();
- keyLabel = INFO_LABEL_DBTEST_ENTRY_DN.get();
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_DECODE_FAIL.get(getExceptionMessage(e)));
- }
- formatedData = String.valueOf(
- JebFormat.entryIDFromDatabase(data.getData()));
- dataLabel = INFO_LABEL_DBTEST_ENTRY_ID.get();
- }
- else if(databaseContainer instanceof ID2Entry)
- {
- formatedKey = String.valueOf(
- JebFormat.entryIDFromDatabase(key.getData()));
- keyLabel = INFO_LABEL_DBTEST_ENTRY_ID.get();
- try
- {
- formatedData = lineSep +
- ID2Entry.entryFromDatabase(
- ByteString.wrap(data.getData()),
- ec.getRootContainer().getCompressedSchema()).toLDIFString();
- dataLabel = INFO_LABEL_DBTEST_ENTRY.get();
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_DECODE_FAIL.get(getExceptionMessage(e)));
- }
- }
- else if(databaseContainer instanceof DN2URI)
- {
- try
- {
- formatedKey = new String(key.getData());
- keyLabel = INFO_LABEL_DBTEST_ENTRY_DN.get();
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_DECODE_FAIL.get(getExceptionMessage(e)));
- }
- formatedData = new String(key.getData());
- dataLabel = INFO_LABEL_DBTEST_URI.get();
- }
- else if(databaseContainer instanceof Index)
- {
- formatedKey = new String(key.getData());
- keyLabel = INFO_LABEL_DBTEST_INDEX_VALUE.get();
-
- EntryIDSet idSet = new EntryIDSet(key.getData(), data.getData());
- if(idSet.isDefined())
- {
- int lineCount = 0;
- StringBuilder builder = new StringBuilder();
-
- for (EntryID entryID : idSet)
- {
- builder.append(entryID);
- if(lineCount == 10)
- {
- builder.append(lineSep);
- lineCount = 0;
- }
- else
- {
- builder.append(" ");
- lineCount++;
- }
- }
- formatedData = builder.toString();
- }
- else
- {
- formatedData = idSet.toString();
- }
- dataLabel = INFO_LABEL_DBTEST_INDEX_ENTRY_ID_LIST.get();
- }
- else if(databaseContainer instanceof VLVIndex)
- {
- VLVIndex index = (VLVIndex)databaseContainer;
- SortKey[] sortKeys = index.sortOrder.getSortKeys();
-
- int pos = 0;
- byte[] keyBytes = key.getData();
- if(keyBytes.length > 0)
- {
- StringBuilder builder = new StringBuilder();
-
- // Decode the attribute values
- for(SortKey sortKey : sortKeys)
- {
- int valueLength = keyBytes[pos] & 0x7F;
- if (keyBytes[pos++] != valueLength)
- {
- int numLengthBytes = valueLength;
- valueLength = 0;
- for (int k=0; k < numLengthBytes; k++, pos++)
- {
- valueLength = (valueLength << 8) |
- (keyBytes[pos] & 0xFF);
- }
- }
-
- byte[] valueBytes = new byte[valueLength];
- System.arraycopy(keyBytes, pos, valueBytes, 0, valueLength);
- builder.append(sortKey.getAttributeType().getNameOrOID());
- builder.append(": ");
- if(valueBytes.length == 0)
- {
- builder.append("NULL");
- }
- else
- {
- builder.append(new String(valueBytes));
- }
- builder.append(" ");
- pos += valueLength;
- }
-
- byte[] entryIDBytes = new byte[8];
- System.arraycopy(keyBytes, pos, entryIDBytes, 0,
- entryIDBytes.length);
- long entryID = JebFormat.entryIDFromDatabase(entryIDBytes);
-
- formatedKey = lineSep + entryID + ": " + builder;
- }
- else
- {
- formatedKey = "UNBOUNDED";
- }
- keyLabel = INFO_LABEL_DBTEST_VLV_INDEX_LAST_SORT_KEYS.get();
-
- try
- {
- StringBuilder builder = new StringBuilder();
- SortValuesSet svs = new SortValuesSet(key.getData(),
- data.getData(),
- index);
- long[] entryIDs = svs.getEntryIDs();
- for(int i = 0; i < entryIDs.length; i++)
- {
- builder.append(entryIDs[i]);
- builder.append(": ");
- for(int j = 0; j < sortKeys.length; j++)
- {
- SortKey sortKey = index.sortOrder.getSortKeys()[j];
- ByteString value = svs.getValue(i * sortKeys.length + j);
- builder.append(sortKey.getAttributeType().getNameOrOID());
- builder.append(": ");
- if(value == null)
- {
- builder.append("NULL");
- }
- else if(value.length() == 0)
- {
- builder.append("SIZE-EXCEEDED");
- }
- else
- {
- builder.append(value);
- }
- builder.append(" ");
- }
- builder.append(lineSep);
- }
- formatedData = lineSep + builder;
- dataLabel = INFO_LABEL_DBTEST_INDEX_ENTRY_ID_LIST.get();
- }
- catch(Exception e)
- {
- printWrappedText(err, ERR_DBTEST_DECODE_FAIL.get(getExceptionMessage(e)));
- }
- }
- }
-
- if(formatedKey == null)
- {
- StringBuilder keyBuilder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(keyBuilder, key.getData(), indent);
- formatedKey = lineSep + keyBuilder;
- }
- if(formatedData == null)
- {
- StringBuilder dataBuilder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(dataBuilder, data.getData(), indent);
- formatedData = lineSep + dataBuilder;
- }
-
- out.format("%s (%d bytes): %s%n", keyLabel,
- key.getData().length, formatedKey);
- out.format("%s (%d bytes): %s%n%n", dataLabel,
- data.getData().length, formatedData);
- }
- status = cursor.getNext(key, data, lockMode);
- count++;
- totalKeySize += key.getData().length;
- totalDataSize += data.getData().length;
- }
- }
- finally
- {
- cursor.close();
- }
- out.format("%nTotal Records: %d%n", count);
- if(count > 0)
- {
- out.format("Total / Average Key Size: %d bytes / %d bytes%n",
- totalKeySize, totalKeySize / count);
- out.format("Total / Average Data Size: %d bytes / %d bytes%n",
- totalDataSize, totalDataSize / count);
- }
- return 0;
- }
- catch(DatabaseException de)
- {
- printWrappedText(err, ERR_DBTEST_ERROR_READING_DATABASE.get(stackTraceToSingleLineString(de)));
- return 1;
- }
- finally
- {
- close(rc);
- releaseSharedLock(backend);
- }
- }
-
- private byte[] parseKeyValue(String value, DatabaseContainer databaseContainer)
- throws ParseException, DirectoryException
- {
- if(value.startsWith("0x"))
- {
- return hexStringToByteArray(value.substring(2));
- }
- else if(databaseContainer instanceof DN2ID
- || databaseContainer instanceof DN2URI)
- {
- // Encode the value as a DN
- return DN.valueOf(value).toNormalizedByteString().toByteArray();
- }
- else if(databaseContainer instanceof ID2Entry)
- {
- // Encode the value as an entryID
- return JebFormat.entryIDToDatabase(
- Long.parseLong(value));
- }
- else if(databaseContainer instanceof VLVIndex)
- {
- // Encode the value as a size/value pair
- byte[] vBytes = StaticUtils.getBytes(value);
- ByteStringBuilder builder = new ByteStringBuilder();
- builder.appendBERLength(vBytes.length);
- builder.appendBytes(vBytes);
- return builder.toByteArray();
- }
- else
- {
- return StaticUtils.getBytes(value);
- }
- }
-
- private Comparator<byte[]> getComparator(DatabaseContainer databaseContainer)
- {
- if(databaseContainer instanceof Index)
- {
- return ((Index) databaseContainer).getComparator();
- }
- else if(databaseContainer instanceof VLVIndex)
- {
- return ((VLVIndex)databaseContainer).comparator;
- }
- else
- { // default comparator
- return ByteSequence.BYTE_ARRAY_COMPARATOR;
- }
- }
-
- private static Map<LocalDBBackendCfg, BackendImpl> getJEBackends(Collection<Backend<?>> otherBackends)
- {
- ArrayList<Backend> backendList = new ArrayList<>();
- ArrayList<BackendCfg> entryList = new ArrayList<>();
- ArrayList<List<DN>> dnList = new ArrayList<>();
- BackendToolUtils.getBackends(backendList, entryList, dnList);
-
- final Map<LocalDBBackendCfg, BackendImpl> jeBackends = new LinkedHashMap<>();
- for(int i = 0; i < backendList.size(); i++)
- {
- Backend<?> backend = backendList.get(i);
- if(backend instanceof BackendImpl)
- {
- jeBackends.put((LocalDBBackendCfg)entryList.get(i),
- (BackendImpl)backend);
- }
- else if (otherBackends != null)
- {
- otherBackends.add(backend);
- }
- }
- return jeBackends;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DN2ID.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DN2ID.java
deleted file mode 100644
index 19d17e9..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DN2ID.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static com.sleepycat.je.LockMode.*;
-import static com.sleepycat.je.OperationStatus.*;
-
-import static org.opends.server.backends.jeb.JebFormat.*;
-
-import java.util.Comparator;
-
-import org.opends.server.types.DN;
-
-import com.sleepycat.je.*;
-
-/**
- * This class represents the DN database, or dn2id, which has one record
- * for each entry. The key is the normalized entry DN and the value
- * is the entry ID.
- */
-public class DN2ID extends DatabaseContainer
-{
- /** The key comparator used for the DN database. */
- private final Comparator<byte[]> comparator;
- private final int prefixRDNComponents;
-
- /**
- * Create a DN2ID instance for the DN database in a given entryContainer.
- *
- * @param name The name of the DN database.
- * @param env The JE environment.
- * @param entryContainer The entryContainer of the DN database.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- @SuppressWarnings("unchecked")
- DN2ID(String name, Environment env, EntryContainer entryContainer)
- throws DatabaseException
- {
- super(name, env, entryContainer);
-
- comparator = new AttributeIndex.KeyComparator();
- prefixRDNComponents = entryContainer.getBaseDN().size();
-
- this.dbConfig = JEBUtils.toDatabaseConfigNoDuplicates(env);
- this.dbConfig.setKeyPrefixing(true);
- this.dbConfig.setBtreeComparator((Class<? extends Comparator<byte[]>>) comparator.getClass());
- }
-
- /**
- * Insert a new record into the DN database.
- * @param txn A JE database transaction to be used for the database operation,
- * or null if none.
- * @param dn The entry DN, which is the key to the record.
- * @param id The entry ID, which is the value of the record.
- * @return true if the record was inserted, false if a record with that key
- * already exists.
- * @throws DatabaseException If an error occurred while attempting to insert
- * the new record.
- */
- boolean insert(Transaction txn, DN dn, EntryID id) throws DatabaseException
- {
- DatabaseEntry key = new DatabaseEntry(dnToDNKey(dn, prefixRDNComponents));
- DatabaseEntry data = id.getDatabaseEntry();
-
- return insert(txn, key, data) == SUCCESS;
- }
-
- /**
- * Write a record to the DN database, where the key and value are already
- * formatted.
- * @param txn A JE database transaction to be used for the database operation,
- * or null if none.
- * @param key A DatabaseEntry containing the record key.
- * @param data A DatabaseEntry containing the record value.
- * @return true if the record was written, false if it was not written.
- * @throws DatabaseException If an error occurred while attempting to write
- * the record.
- */
- @Override
- public OperationStatus put(Transaction txn, DatabaseEntry key, DatabaseEntry data) throws DatabaseException
- {
- return super.put(txn, key, data);
- }
-
- /**
- * Remove a record from the DN database.
- * @param txn A JE database transaction to be used for the database operation,
- * or null if none.
- * @param dn The entry DN, which is the key to the record.
- * @return true if the record was removed, false if it was not removed.
- * @throws DatabaseException If an error occurred while attempting to remove
- * the record.
- */
- boolean remove(Transaction txn, DN dn) throws DatabaseException
- {
- DatabaseEntry key = new DatabaseEntry(dnToDNKey(dn, prefixRDNComponents));
-
- return delete(txn, key) == SUCCESS;
- }
-
- /**
- * Fetch the entry ID for a given DN.
- * @param txn A JE database transaction to be used for the database read, or
- * null if none is required.
- * @param dn The DN for which the entry ID is desired.
- * @param lockMode The JE locking mode to be used for the read.
- * @return The entry ID, or null if the given DN is not in the DN database.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public EntryID get(Transaction txn, DN dn, LockMode lockMode) throws DatabaseException
- {
- DatabaseEntry key = new DatabaseEntry(dnToDNKey(dn, prefixRDNComponents));
- DatabaseEntry data = new DatabaseEntry();
-
- if (read(txn, key, data, DEFAULT) == SUCCESS)
- {
- return new EntryID(data);
- }
- return null;
- }
-
- /** {@inheritDoc} */
- @Override
- public OperationStatus read(Transaction txn, DatabaseEntry key, DatabaseEntry data, LockMode lockMode)
- {
- return super.read(txn, key, data, lockMode);
- }
-
- /**
- * Gets the comparator for records stored in this database.
- *
- * @return The comparator for records stored in this database.
- */
- public Comparator<byte[]> getComparator()
- {
- return comparator;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DN2URI.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DN2URI.java
deleted file mode 100644
index b9faca0..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DN2URI.java
+++ /dev/null
@@ -1,731 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2012-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static com.sleepycat.je.LockMode.*;
-import static com.sleepycat.je.OperationStatus.*;
-
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.backends.jeb.JebFormat.*;
-import static org.opends.server.util.ServerConstants.*;
-
-import java.util.*;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.ldap.ByteSequenceReader;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.forgerock.opendj.ldap.ConditionResult;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.opendj.ldap.SearchScope;
-import org.forgerock.util.Pair;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.core.SearchOperation;
-import org.opends.server.types.*;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.*;
-
-/**
- * This class represents the referral database which contains URIs from referral
- * entries.
- * <p>
- * The key is the DN of the referral entry and the value is that of a pair
- * (labeled URI in the ref attribute for that entry, DN). The DN must be
- * duplicated in the value because the key is suitable for comparisons but is
- * not reversible to a valid DN. Duplicate keys are permitted since a referral
- * entry can contain multiple values of the ref attribute. Key order is the same
- * as in the DN database so that all referrals in a subtree can be retrieved by
- * cursoring through a range of the records.
- */
-public class DN2URI extends DatabaseContainer
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- private static final byte STRING_SEPARATOR = 0x00;
-
- /**
- * The key comparator used for the DN database.
- */
- private final Comparator<byte[]> dn2uriComparator;
-
-
- private final int prefixRDNComponents;
-
-
- /**
- * The standard attribute type that is used to specify the set of referral
- * URLs in a referral entry.
- */
- private final AttributeType referralType = DirectoryServer.getAttributeTypeOrNull(ATTR_REFERRAL_URL);
-
- /**
- * A flag that indicates whether there are any referrals contained in this database.
- * It should only be set to {@code false} when it is known that there are no referrals.
- */
- private volatile ConditionResult containsReferrals = ConditionResult.UNDEFINED;
-
-
- /**
- * Create a new object representing a referral database in a given
- * entryContainer.
- *
- * @param name The name of the referral database.
- * @param env The JE environment.
- * @param entryContainer The entryContainer of the DN database.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- @SuppressWarnings("unchecked")
- DN2URI(String name, Environment env,
- EntryContainer entryContainer)
- throws DatabaseException
- {
- super(name, env, entryContainer);
-
- dn2uriComparator = new AttributeIndex.KeyComparator();
- prefixRDNComponents = entryContainer.getBaseDN().size();
-
- this.dbConfig = JEBUtils.toDatabaseConfigAllowDuplicates(env);
- this.dbConfig.setBtreeComparator((Class<? extends Comparator<byte[]>>)
- dn2uriComparator.getClass());
- }
-
- /**
- * Insert a URI value in the referral database.
- *
- * @param txn A database transaction used for the update, or null if none is
- * required.
- * @param dn The DN of the referral entry.
- * @param labeledURI The labeled URI value of the ref attribute.
- * @return true if the record was inserted, false if it was not.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private boolean insert(Transaction txn, DN dn, String labeledURI)
- throws DatabaseException
- {
- byte[] normDN = JebFormat.dnToDNKey(dn, prefixRDNComponents);
- DatabaseEntry key = new DatabaseEntry(normDN);
- DatabaseEntry data = new DatabaseEntry(encodeURIAndDN(labeledURI, dn));
-
- // The JE insert method does not permit duplicate keys so we must use the
- // put method.
- if (put(txn, key, data) == SUCCESS)
- {
- containsReferrals = ConditionResult.TRUE;
- return true;
- }
- return false;
- }
-
- private byte[] encodeURIAndDN(String labeledURI, DN dn)
- {
- return new ByteStringBuilder()
- .appendUtf8(labeledURI)
- .appendByte(STRING_SEPARATOR)
- .appendUtf8(dn.toString())
- .toByteArray();
- }
-
- private Pair<String, DN> decodeURIAndDN(byte[] data) throws DirectoryException {
- try {
- final ByteSequenceReader reader = ByteString.valueOfBytes(data).asReader();
- final String labeledURI = reader.readStringUtf8(getNextStringLength(reader));
- // skip the string separator
- reader.skip(1);
- final DN dn = DN.valueOf(reader.readStringUtf8(reader.remaining()));
- return Pair.of(labeledURI, dn);
- }
- catch (Exception e) {
- throw new DirectoryException(ResultCode.OPERATIONS_ERROR, ERR_DATABASE_EXCEPTION.get(e));
- }
- }
-
- /** Returns the length of next string by looking for the zero byte used as separator. */
- private int getNextStringLength(ByteSequenceReader reader)
- {
- int length = 0;
- while (reader.peek(length) != STRING_SEPARATOR)
- {
- length++;
- }
- return length;
- }
-
- /**
- * Delete URI values for a given referral entry from the referral database.
- *
- * @param txn A database transaction used for the update, or null if none is
- * required.
- * @param dn The DN of the referral entry for which URI values are to be
- * deleted.
- * @return true if the values were deleted, false if not.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- boolean delete(Transaction txn, DN dn) throws DatabaseException
- {
- byte[] normDN = JebFormat.dnToDNKey(dn, prefixRDNComponents);
- DatabaseEntry key = new DatabaseEntry(normDN);
-
- if (delete(txn, key) == SUCCESS)
- {
- containsReferrals = containsReferrals(txn);
- return true;
- }
- return false;
- }
-
- /**
- * Delete a single URI value from the referral database.
- * @param txn A database transaction used for the update, or null if none is
- * required.
- * @param dn The DN of the referral entry.
- * @param labeledURI The URI value to be deleted.
- * @return true if the value was deleted, false if not.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private boolean delete(Transaction txn, DN dn, String labeledURI) throws DatabaseException
- {
- CursorConfig cursorConfig = null;
- byte[] normDN = JebFormat.dnToDNKey(dn, prefixRDNComponents);
- byte[] URIBytes = StaticUtils.getBytes(labeledURI);
- DatabaseEntry key = new DatabaseEntry(normDN);
- DatabaseEntry data = new DatabaseEntry(URIBytes);
-
- Cursor cursor = openCursor(txn, cursorConfig);
- try
- {
- OperationStatus status = cursor.getSearchBoth(key, data, null);
- if (status == OperationStatus.SUCCESS)
- {
- status = cursor.delete();
- }
-
- if (status == OperationStatus.SUCCESS)
- {
- containsReferrals = containsReferrals(txn);
- return true;
- }
- return false;
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Indicates whether the underlying database contains any referrals.
- *
- * @param txn The transaction to use when making the determination.
- *
- * @return {@code true} if it is believed that the underlying database may
- * contain at least one referral, or {@code false} if it is certain
- * that it doesn't.
- */
- private ConditionResult containsReferrals(Transaction txn)
- {
- try
- {
- Cursor cursor = openCursor(txn, null);
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- OperationStatus status = cursor.getFirst(key, data, null);
- cursor.close();
-
- if (status == OperationStatus.SUCCESS)
- {
- return ConditionResult.TRUE;
- }
- else if (status == OperationStatus.NOTFOUND)
- {
- return ConditionResult.FALSE;
- }
- else
- {
- return ConditionResult.UNDEFINED;
- }
- }
- catch (Exception e)
- {
- logger.traceException(e);
-
- return ConditionResult.UNDEFINED;
- }
- }
-
- /**
- * Update the referral database for an entry that has been modified. Does
- * not do anything unless the entry before the modification or the entry after
- * the modification is a referral entry.
- *
- * @param txn A database transaction used for the update, or null if none is
- * required.
- * @param before The entry before the modifications have been applied.
- * @param after The entry after the modifications have been applied.
- * @param mods The sequence of modifications made to the entry.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- void modifyEntry(Transaction txn, Entry before, Entry after, List<Modification> mods)
- throws DatabaseException
- {
- DN entryDN = before.getName();
- for (Modification mod : mods)
- {
- Attribute modAttr = mod.getAttribute();
- AttributeType modAttrType = modAttr.getAttributeType();
- if (modAttrType.equals(referralType))
- {
- Attribute a = mod.getAttribute();
- switch (mod.getModificationType().asEnum())
- {
- case ADD:
- if (a != null)
- {
- for (ByteString v : a)
- {
- insert(txn, entryDN, v.toString());
- }
- }
- break;
-
- case DELETE:
- if (a == null || a.isEmpty())
- {
- delete(txn, entryDN);
- }
- else
- {
- for (ByteString v : a)
- {
- delete(txn, entryDN, v.toString());
- }
- }
- break;
-
- case INCREMENT:
- // Nonsensical.
- break;
-
- case REPLACE:
- delete(txn, entryDN);
- if (a != null)
- {
- for (ByteString v : a)
- {
- insert(txn, entryDN, v.toString());
- }
- }
- break;
- }
- }
- }
- }
-
- /**
- * Update the referral database for an entry that has been replaced. Does
- * not do anything unless the entry before it was replaced or the entry after
- * it was replaced is a referral entry.
- *
- * @param txn A database transaction used for the update, or null if none is
- * required.
- * @param before The entry before it was replaced.
- * @param after The entry after it was replaced.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public void replaceEntry(Transaction txn, Entry before, Entry after)
- throws DatabaseException
- {
- deleteEntry(txn, before);
- addEntry(txn, after);
- }
-
- /**
- * Update the referral database for a new entry. Does nothing if the entry
- * is not a referral entry.
- * @param txn A database transaction used for the update, or null if none is
- * required.
- * @param entry The entry to be added.
- * @return True if the entry was added successfully or False otherwise.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public boolean addEntry(Transaction txn, Entry entry)
- throws DatabaseException
- {
- boolean success = true;
- Set<String> labeledURIs = entry.getReferralURLs();
- if (labeledURIs != null)
- {
- DN dn = entry.getName();
- for (String labeledURI : labeledURIs)
- {
- if(!insert(txn, dn, labeledURI))
- {
- success = false;
- }
- }
- }
- return success;
- }
-
- /**
- * Update the referral database for a deleted entry. Does nothing if the entry
- * was not a referral entry.
- * @param txn A database transaction used for the update, or null if none is
- * required.
- * @param entry The entry to be deleted.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- void deleteEntry(Transaction txn, Entry entry) throws DatabaseException
- {
- Set<String> labeledURIs = entry.getReferralURLs();
- if (labeledURIs != null)
- {
- delete(txn, entry.getName());
- }
- }
-
- /**
- * Checks whether the target of an operation is a referral entry and throws
- * a Directory referral exception if it is.
- * @param entry The target entry of the operation, or the base entry of a
- * search operation.
- * @param searchScope The scope of the search operation, or null if the
- * operation is not a search operation.
- * @throws DirectoryException If a referral is found at or above the target
- * DN. The referral URLs will be set appropriately for the references found
- * in the referral entry.
- */
- void checkTargetForReferral(Entry entry, SearchScope searchScope) throws DirectoryException
- {
- Set<String> referralURLs = entry.getReferralURLs();
- if (referralURLs != null)
- {
- throwReferralException(entry.getName(), entry.getName(), referralURLs,
- searchScope);
- }
- }
-
- /**
- * Throws a Directory referral exception for the case where a referral entry
- * exists at or above the target DN of an operation.
- * @param targetDN The target DN of the operation, or the base object of a
- * search operation.
- * @param referralDN The DN of the referral entry.
- * @param labeledURIs The set of labeled URIs in the referral entry.
- * @param searchScope The scope of the search operation, or null if the
- * operation is not a search operation.
- * @throws DirectoryException If a referral is found at or above the target
- * DN. The referral URLs will be set appropriately for the references found
- * in the referral entry.
- */
- private void throwReferralException(DN targetDN, DN referralDN, Set<String> labeledURIs, SearchScope searchScope)
- throws DirectoryException
- {
- ArrayList<String> URIList = new ArrayList<>(labeledURIs.size());
- for (String labeledURI : labeledURIs)
- {
- // Remove the label part of the labeled URI if there is a label.
- String uri = labeledURI;
- int i = labeledURI.indexOf(' ');
- if (i != -1)
- {
- uri = labeledURI.substring(0, i);
- }
-
- try
- {
- LDAPURL ldapurl = LDAPURL.decode(uri, false);
-
- if ("ldap".equalsIgnoreCase(ldapurl.getScheme()))
- {
- DN urlBaseDN = targetDN;
- if (!referralDN.equals(ldapurl.getBaseDN()))
- {
- urlBaseDN =
- EntryContainer.modDN(targetDN,
- referralDN.size(),
- ldapurl.getBaseDN());
- }
- ldapurl.setBaseDN(urlBaseDN);
- if (searchScope == null)
- {
- // RFC 3296, 5.2. Target Object Considerations:
- // In cases where the URI to be returned is a LDAP URL, the server
- // SHOULD trim any present scope, filter, or attribute list from the
- // URI before returning it. Critical extensions MUST NOT be trimmed
- // or modified.
- StringBuilder builder = new StringBuilder(uri.length());
- ldapurl.toString(builder, true);
- uri = builder.toString();
- }
- else
- {
- // RFC 3296, 5.3. Base Object Considerations:
- // In cases where the URI to be returned is a LDAP URL, the server
- // MUST provide an explicit scope specifier from the LDAP URL prior
- // to returning it.
- ldapurl.getAttributes().clear();
- ldapurl.setScope(searchScope);
- ldapurl.setFilter(null);
- uri = ldapurl.toString();
- }
- }
- }
- catch (DirectoryException e)
- {
- logger.traceException(e);
- // Return the non-LDAP URI as is.
- }
-
- URIList.add(uri);
- }
-
- // Throw a directory referral exception containing the URIs.
- LocalizableMessage msg = NOTE_REFERRAL_RESULT_MESSAGE.get(referralDN);
- throw new DirectoryException(
- ResultCode.REFERRAL, msg, referralDN, URIList, null);
- }
-
- /**
- * Process referral entries that are above the target DN of an operation.
- * @param targetDN The target DN of the operation, or the base object of a
- * search operation.
- * @param searchScope The scope of the search operation, or null if the
- * operation is not a search operation.
- * @throws DirectoryException If a referral is found at or above the target
- * DN. The referral URLs will be set appropriately for the references found
- * in the referral entry.
- */
- void targetEntryReferrals(DN targetDN, SearchScope searchScope) throws DirectoryException
- {
- if (containsReferrals == ConditionResult.UNDEFINED)
- {
- containsReferrals = containsReferrals(null);
- }
-
- if (containsReferrals == ConditionResult.FALSE)
- {
- return;
- }
-
- Transaction txn = null;
- CursorConfig cursorConfig = null;
-
- try
- {
- Cursor cursor = openCursor(txn, cursorConfig);
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- // Go up through the DIT hierarchy until we find a referral.
- for (DN dn = entryContainer.getParentWithinBase(targetDN); dn != null;
- dn = entryContainer.getParentWithinBase(dn))
- {
- // Look for a record whose key matches the current DN.
- key.setData(JebFormat.dnToDNKey(dn, prefixRDNComponents));
- OperationStatus status = cursor.getSearchKey(key, data, DEFAULT);
- if (status == OperationStatus.SUCCESS)
- {
- // Construct a set of all the labeled URIs in the referral.
- Set<String> labeledURIs = new LinkedHashSet<>(cursor.count());
- do
- {
- final Pair<String, DN> uriAndDN = decodeURIAndDN(data.getData());
- final String labeledURI = uriAndDN.getFirst();
- labeledURIs.add(labeledURI);
- status = cursor.getNextDup(key, data, DEFAULT);
- } while (status == OperationStatus.SUCCESS);
-
- throwReferralException(targetDN, dn, labeledURIs, searchScope);
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- }
- }
-
- /**
- * Return search result references for a search operation using the referral
- * database to find all referral entries within scope of the search.
- * @param searchOp The search operation for which search result references
- * should be returned.
- * @return <CODE>true</CODE> if the caller should continue processing the
- * search request and sending additional entries and references, or
- * <CODE>false</CODE> if not for some reason (e.g., the size limit
- * has been reached or the search has been abandoned).
- * @throws DirectoryException If a Directory Server error occurs.
- */
- boolean returnSearchReferences(SearchOperation searchOp) throws DirectoryException
- {
- if (containsReferrals == ConditionResult.UNDEFINED)
- {
- containsReferrals = containsReferrals(null);
- }
-
- if (containsReferrals == ConditionResult.FALSE)
- {
- return true;
- }
-
- Transaction txn = null;
- CursorConfig cursorConfig = null;
-
- /*
- * We will iterate forwards through a range of the keys to
- * find subordinates of the base entry from the top of the tree
- * downwards.
- */
- byte[] baseDN = JebFormat.dnToDNKey(searchOp.getBaseDN(), prefixRDNComponents);
- final byte special = 0x00;
- byte[] suffix = Arrays.copyOf(baseDN, baseDN.length+1);
- suffix[suffix.length - 1] = special;
- byte[] end = Arrays.copyOf(suffix, suffix.length);
- end[end.length - 1] = special + 1;
-
- /*
- * Set the ending value to a value of equal length but slightly
- * greater than the suffix. Since keys are compared in
- * reverse order we must set the first byte (the comma).
- * No possibility of overflow here.
- */
-
- DatabaseEntry data = new DatabaseEntry();
- DatabaseEntry key = new DatabaseEntry(suffix);
-
- try
- {
- Cursor cursor = openCursor(txn, cursorConfig);
- try
- {
- // Initialize the cursor very close to the starting value then
- // step forward until we pass the ending value.
- for (OperationStatus status =
- cursor.getSearchKeyRange(key, data, DEFAULT);
- status == OperationStatus.SUCCESS;
- status = cursor.getNextNoDup(key, data, DEFAULT))
- {
- int cmp = dn2uriComparator.compare(key.getData(), end);
- if (cmp >= 0)
- {
- // We have gone past the ending value.
- break;
- }
-
- // We have found a subordinate referral.
- final Pair<String, DN> uriAndDN = decodeURIAndDN(data.getData());
- final String labeledURI = uriAndDN.getFirst();
- final DN dn = uriAndDN.getSecond();
-
- // Make sure the referral is within scope.
- if (searchOp.getScope() == SearchScope.SINGLE_LEVEL
- && findDNKeyParent(key.getData()) != baseDN.length)
- {
- continue;
- }
-
- // Construct a list of all the URIs in the referral.
- ArrayList<String> URIList = new ArrayList<>(cursor.count());
- do
- {
- // Remove the label part of the labeled URI if there is a label.
- String uri = labeledURI;
- int i = labeledURI.indexOf(' ');
- if (i != -1)
- {
- uri = labeledURI.substring(0, i);
- }
-
- // From RFC 3296 section 5.4:
- // If the URI component is not a LDAP URL, it should be returned as
- // is. If the LDAP URL's DN part is absent or empty, the DN part
- // must be modified to contain the DN of the referral object. If
- // the URI component is a LDAP URL, the URI SHOULD be modified to
- // add an explicit scope specifier.
- try
- {
- LDAPURL ldapurl = LDAPURL.decode(uri, false);
-
- if ("ldap".equalsIgnoreCase(ldapurl.getScheme()))
- {
- if (ldapurl.getBaseDN().isRootDN())
- {
- ldapurl.setBaseDN(dn);
- }
- ldapurl.getAttributes().clear();
- if (searchOp.getScope() == SearchScope.SINGLE_LEVEL)
- {
- ldapurl.setScope(SearchScope.BASE_OBJECT);
- }
- else
- {
- ldapurl.setScope(SearchScope.WHOLE_SUBTREE);
- }
- ldapurl.setFilter(null);
- uri = ldapurl.toString();
- }
- }
- catch (DirectoryException e)
- {
- logger.traceException(e);
- // Return the non-LDAP URI as is.
- }
-
- URIList.add(uri);
- status = cursor.getNextDup(key, data, DEFAULT);
- } while (status == OperationStatus.SUCCESS);
-
- SearchResultReference reference = new SearchResultReference(URIList);
- if (!searchOp.returnReference(dn, reference))
- {
- return false;
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- }
-
- return true;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DataConfig.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DataConfig.java
deleted file mode 100644
index f99779c..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DataConfig.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import org.forgerock.util.Reject;
-import org.opends.server.api.CompressedSchema;
-import org.opends.server.types.EntryEncodeConfig;
-
-/**
- * Configuration class to indicate desired compression and cryptographic options
- * for the data stored in the database.
- */
-public final class DataConfig
-{
- /** Indicates whether data should be compressed before writing to the database. */
- private boolean compressed;
-
- /** The configuration to use when encoding entries in the database. */
- private EntryEncodeConfig encodeConfig = new EntryEncodeConfig();
-
- /**
- * Construct a new DataConfig object with the specified settings.
- *
- * @param compressed true if data should be compressed, false if not.
- * @param compactEncoding true if data should be encoded in compact form,
- * false if not.
- * @param compressedSchema the compressed schema manager to use. It must not
- * be {@code null} if compactEncoding is {@code true}.
- */
- public DataConfig(boolean compressed, boolean compactEncoding, CompressedSchema compressedSchema)
- {
- this.compressed = compressed;
- setCompactEncoding(compactEncoding, compressedSchema);
- }
-
- /**
- * Determine whether data should be compressed before writing to the database.
- * @return true if data should be compressed, false if not.
- */
- public boolean isCompressed()
- {
- return compressed;
- }
-
- /**
- * Determine whether entries should be encoded with the compact form before
- * writing to the database.
- * @return true if data should be encoded in the compact form.
- */
- public boolean isCompactEncoding()
- {
- return encodeConfig.compressAttributeDescriptions();
- }
-
- /**
- * Configure whether data should be compressed before writing to the database.
- * @param compressed true if data should be compressed, false if not.
- */
- public void setCompressed(boolean compressed)
- {
- this.compressed = compressed;
- }
-
- /**
- * Configure whether data should be encoded with the compact form before
- * writing to the database.
- * @param compactEncoding true if data should be encoded in compact form,
- * false if not.
- * @param compressedSchema The compressed schema manager to use. It must not
- * be {@code null} if compactEncoding is {@code true}.
- */
- public void setCompactEncoding(boolean compactEncoding, CompressedSchema compressedSchema)
- {
- if (compressedSchema == null)
- {
- Reject.ifTrue(compactEncoding);
- this.encodeConfig = new EntryEncodeConfig(false, compactEncoding, false);
- }
- else
- {
- this.encodeConfig = new EntryEncodeConfig(false, compactEncoding, compactEncoding, compressedSchema);
- }
- }
-
- /**
- * Get the EntryEncodeConfig object in use by this configuration.
- * @return the EntryEncodeConfig object in use by this configuration.
- */
- public EntryEncodeConfig getEntryEncodeConfig()
- {
- return this.encodeConfig;
- }
-
- /**
- * Get a string representation of this object.
- * @return A string representation of this object.
- */
- @Override
- public String toString()
- {
- final StringBuilder builder = new StringBuilder();
- builder.append("DataConfig(compressed=");
- builder.append(compressed);
- builder.append(", ");
- encodeConfig.toString(builder);
- builder.append(")");
- return builder.toString();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DatabaseContainer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DatabaseContainer.java
deleted file mode 100644
index 44e55e9..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DatabaseContainer.java
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.io.Closeable;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.opends.server.util.ServerConstants;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.*;
-
-/**
- * This class is a wrapper around the JE database object and provides basic
- * read and write methods for entries.
- */
-public abstract class DatabaseContainer implements Closeable
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The database entryContainer. */
- protected final EntryContainer entryContainer;
- /** The name of the database within the entryContainer. */
- protected String name;
-
- /** The JE database configuration. */
- protected DatabaseConfig dbConfig;
- /** The reference to the JE Environment. */
- private final Environment env;
- /** A JE database handle opened through this database container. */
- private Database database;
-
- /**
- * Create a new DatabaseContainer object.
- *
- * @param name The name of the entry database.
- * @param env The JE Environment.
- * @param entryContainer The entryContainer of the entry database.
- */
- protected DatabaseContainer(String name, Environment env, EntryContainer entryContainer)
- {
- this.env = env;
- this.entryContainer = entryContainer;
- this.name = name;
- }
-
- /**
- * Opens a JE database in this database container. If the provided
- * database configuration is transactional, a transaction will be
- * created and used to perform the open.
- *
- * @throws DatabaseException if a JE database error occurs while
- * opening the index.
- */
- public void open() throws DatabaseException
- {
- if (dbConfig.getTransactional())
- {
- // Open the database under a transaction.
- Transaction txn = entryContainer.beginTransaction();
- try
- {
- database = env.openDatabase(txn, name, dbConfig);
- if (logger.isTraceEnabled())
- {
- logger.trace("JE database %s opened. txnid=%d", database.getDatabaseName(), txn.getId());
- }
- EntryContainer.transactionCommit(txn);
- }
- catch (DatabaseException e)
- {
- EntryContainer.transactionAbort(txn);
- throw e;
- }
- }
- else
- {
- database = env.openDatabase(null, name, dbConfig);
- if (logger.isTraceEnabled())
- {
- logger.trace("JE database %s opened. txnid=none", database.getDatabaseName());
- }
- }
- }
-
- /**
- * Flush any cached database information to disk and close the
- * database container.
- *
- * The database container should not be closed while other processes
- * acquired the container. The container should not be closed
- * while cursors handles into the database remain open, or
- * transactions that include operations on the database have not yet
- * been committed or aborted.
- *
- * The container may not be accessed again after this method is
- * called, regardless of the method's success or failure.
- *
- * @throws DatabaseException if an error occurs.
- */
- @Override
- public synchronized void close() throws DatabaseException
- {
- if(dbConfig.getDeferredWrite())
- {
- database.sync();
- }
- database.close();
- database = null;
-
- if(logger.isTraceEnabled())
- {
- logger.trace("Closed database %s", name);
- }
- }
-
- /**
- * Replace or insert a record into a JE database, with optional debug logging.
- * This is a simple wrapper around the JE Database.put method.
- * @param txn The JE transaction handle, or null if none.
- * @param key The record key.
- * @param data The record value.
- * @return The operation status.
- * @throws DatabaseException If an error occurs in the JE operation.
- */
- OperationStatus put(Transaction txn, DatabaseEntry key, DatabaseEntry data) throws DatabaseException
- {
- OperationStatus status = database.put(txn, key, data);
- if (logger.isTraceEnabled())
- {
- logger.trace(messageToLog(status, database, txn, key, data));
- }
- return status;
- }
-
- /**
- * Read a record from a JE database, with optional debug logging. This is a
- * simple wrapper around the JE Database.get method.
- * @param txn The JE transaction handle, or null if none.
- * @param key The key of the record to be read.
- * @param data The record value returned as output. Its byte array does not
- * need to be initialized by the caller.
- * @param lockMode The JE locking mode to be used for the read.
- * @return The operation status.
- * @throws DatabaseException If an error occurs in the JE operation.
- */
- OperationStatus read(Transaction txn, DatabaseEntry key, DatabaseEntry data, LockMode lockMode)
- throws DatabaseException
- {
- OperationStatus status = database.get(txn, key, data, lockMode);
- if (logger.isTraceEnabled())
- {
- logger.trace(messageToLog(status, database, txn, key, data));
- }
- return status;
- }
-
- /**
- * Insert a record into a JE database, with optional debug logging. This is a
- * simple wrapper around the JE Database.putNoOverwrite method.
- * @param txn The JE transaction handle, or null if none.
- * @param key The record key.
- * @param data The record value.
- * @return The operation status.
- * @throws DatabaseException If an error occurs in the JE operation.
- */
- OperationStatus insert(Transaction txn, DatabaseEntry key, DatabaseEntry data) throws DatabaseException
- {
- OperationStatus status = database.putNoOverwrite(txn, key, data);
- if (logger.isTraceEnabled())
- {
- logger.trace(messageToLog(status, database, txn, key, data));
- }
- return status;
- }
-
- /**
- * Delete a record from a JE database, with optional debug logging. This is a
- * simple wrapper around the JE Database.delete method.
- * @param txn The JE transaction handle, or null if none.
- * @param key The key of the record to be read.
- * @return The operation status.
- * @throws DatabaseException If an error occurs in the JE operation.
- */
- OperationStatus delete(Transaction txn, DatabaseEntry key) throws DatabaseException
- {
- OperationStatus status = database.delete(txn, key);
- if (logger.isTraceEnabled())
- {
- logger.trace(messageToLog(status, database, txn, key, null));
- }
- return status;
- }
-
- /**
- * Open a JE cursor on the JE database. This is a simple wrapper around
- * the JE Database.openCursor method.
- * @param txn A JE database transaction to be used by the cursor,
- * or null if none.
- * @param cursorConfig The JE cursor configuration.
- * @return A JE cursor.
- * @throws DatabaseException If an error occurs while attempting to open
- * the cursor.
- */
- public Cursor openCursor(Transaction txn, CursorConfig cursorConfig)
- throws DatabaseException
- {
- return database.openCursor(txn, cursorConfig);
- }
-
- /**
- * Open a JE disk ordered cursor on the JE database. This is a
- * simple wrapper around the JE Database.openCursor method.
- * @param cursorConfig The JE disk ordered cursor configuration.
- * @return A JE disk ordered cursor.
- * @throws DatabaseException If an error occurs while attempting to open
- * the cursor.
- */
- public DiskOrderedCursor openCursor(DiskOrderedCursorConfig cursorConfig)
- throws DatabaseException
- {
- return database.openCursor(cursorConfig);
- }
-
- /**
- * Get the count of key/data pairs in the database in a JE database.
- * This is a simple wrapper around the JE Database.count method.
- * @return The count of key/data pairs in the database.
- * @throws DatabaseException If an error occurs in the JE operation.
- */
- public long getRecordCount() throws DatabaseException
- {
- long count = database.count();
- if (logger.isTraceEnabled())
- {
- logger.trace(messageToLog(OperationStatus.SUCCESS, database, null, null, null));
- }
- return count;
- }
-
- /**
- * Get a string representation of this object.
- * @return return A string representation of this object.
- */
- @Override
- public String toString()
- {
- return name;
- }
-
- /**
- * Get the JE database name for this database container.
- *
- * @return JE database name for this database container.
- */
- public String getName()
- {
- return name;
- }
-
- /**
- * Preload the database into cache.
- *
- * @param config The preload configuration.
- * @return Statistics about the preload process.
- * @throws DatabaseException If an JE database error occurs
- * during the preload.
- */
- public PreloadStats preload(PreloadConfig config)
- throws DatabaseException
- {
- return database.preload(config);
- }
-
- /**
- * Set the JE database name to use for this container.
- *
- * @param name The database name to use for this container.
- */
- void setName(String name)
- {
- this.name = name;
- }
-
- /** Returns the message to log given the provided information. */
- private String messageToLog(OperationStatus status, Database database,
- Transaction txn, DatabaseEntry key, DatabaseEntry data)
- {
- StringBuilder builder = new StringBuilder();
- builder.append(" (");
- builder.append(status);
- builder.append(")");
- builder.append(" db=");
- try
- {
- builder.append(database.getDatabaseName());
- }
- catch (DatabaseException de)
- {
- builder.append(de);
- }
- if (txn != null)
- {
- builder.append(" txnid=");
- try
- {
- builder.append(txn.getId());
- }
- catch (DatabaseException de)
- {
- builder.append(de);
- }
- }
- else
- {
- builder.append(" txnid=none");
- }
-
- builder.append(ServerConstants.EOL);
- if (key != null)
- {
- builder.append("key:");
- builder.append(ServerConstants.EOL);
- StaticUtils.byteArrayToHexPlusAscii(builder, key.getData(), 4);
- }
-
- // If the operation was successful we log the same common information
- // plus the data
- if (status == OperationStatus.SUCCESS && data != null)
- {
- builder.append("data(len=");
- builder.append(data.getSize());
- builder.append("):");
- builder.append(ServerConstants.EOL);
- StaticUtils.byteArrayToHexPlusAscii(builder, data.getData(), 4);
- }
- return builder.toString();
- }
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DatabaseEnvironmentMonitor.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DatabaseEnvironmentMonitor.java
deleted file mode 100644
index 36f50a4..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DatabaseEnvironmentMonitor.java
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.forgerock.opendj.ldap.schema.Syntax;
-import org.opends.server.admin.std.server.MonitorProviderCfg;
-import org.opends.server.api.MonitorProvider;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.types.Attribute;
-import org.opends.server.types.AttributeBuilder;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.Attributes;
-import org.opends.server.types.InitializationException;
-import org.opends.server.types.SearchFilter;
-import org.opends.server.util.TimeThread;
-
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.EnvironmentStats;
-import com.sleepycat.je.JEVersion;
-import com.sleepycat.je.StatsConfig;
-import com.sleepycat.je.TransactionStats;
-
-/**
- * A monitor provider for a Berkeley DB JE environment.
- * It uses reflection on the environment statistics object
- * so that we don't need to keep a list of all the stats.
- */
-final class DatabaseEnvironmentMonitor
- extends MonitorProvider<MonitorProviderCfg>
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /**
- * Represents the statistical information kept for each search filter.
- */
- private static class FilterStats implements Comparable<FilterStats>
- {
- private volatile LocalizableMessage failureReason = LocalizableMessage.EMPTY;
- private long maxMatchingEntries = -1;
- private final AtomicInteger hits = new AtomicInteger();
-
- @Override
- public int compareTo(FilterStats that) {
- return this.hits.get() - that.hits.get();
- }
-
- private void update(int hitCount, LocalizableMessage failureReason)
- {
- this.hits.getAndAdd(hitCount);
- this.failureReason = failureReason;
- }
-
- private void update(int hitCount, long matchingEntries)
- {
- this.hits.getAndAdd(hitCount);
- this.failureReason = LocalizableMessage.EMPTY;
- synchronized(this)
- {
- if(matchingEntries > maxMatchingEntries)
- {
- maxMatchingEntries = matchingEntries;
- }
- }
- }
- }
-
- /** The name of this monitor instance. */
- private String name;
- /** The root container to be monitored. */
- private RootContainer rootContainer;
-
- private int maxEntries = 1024;
- private boolean filterUseEnabled;
- private String startTimeStamp;
- private final HashMap<SearchFilter, FilterStats> filterToStats = new HashMap<>();
- private final AtomicInteger indexedSearchCount = new AtomicInteger();
- private final AtomicInteger unindexedSearchCount = new AtomicInteger();
-
- /**
- * Creates a new database environment monitor.
- * @param name The monitor instance name.
- * @param rootContainer A root container handle for the database to be
- * monitored.
- */
- public DatabaseEnvironmentMonitor(String name, RootContainer rootContainer)
- {
- this.name = name;
- this.rootContainer = rootContainer;
- }
-
- /** {@inheritDoc} */
- @Override
- public void initializeMonitorProvider(MonitorProviderCfg configuration)
- throws ConfigException, InitializationException
- {
- }
-
- /**
- * Retrieves the name of this monitor provider. It should be unique among all
- * monitor providers, including all instances of the same monitor provider.
- *
- * @return The name of this monitor provider.
- */
- @Override
- public String getMonitorInstanceName()
- {
- return name;
- }
-
- /**
- * Creates monitor attribute values for a given JE statistics object,
- * using reflection to call all the getter methods of the statistics object.
- * The attribute type names of the created attribute values are derived from
- * the names of the getter methods.
- * @param monitorAttrs The monitor attribute values are inserted into this
- * attribute list.
- * @param stats The JE statistics object.
- * @param attrPrefix A common prefix for the attribute type names of the
- * monitor attribute values, to distinguish the attributes of one
- * type of statistical object from another, and to avoid attribute name
- * collisions.
- */
- private void addAttributesForStatsObject(ArrayList<Attribute> monitorAttrs,
- Object stats, String attrPrefix)
- {
- Class<?> c = stats.getClass();
- Method[] methods = c.getMethods();
-
- // Iterate through all the statistic class methods.
- for (Method method : methods)
- {
- // Invoke all the getters returning integer values.
- if (method.getName().startsWith("get"))
- {
- Class<?> returnType = method.getReturnType();
- if (returnType.equals(int.class) || returnType.equals(long.class))
- {
- Syntax integerSyntax = DirectoryServer.getDefaultIntegerSyntax();
-
- // Remove the 'get' from the method name and add the prefix.
- String attrName = attrPrefix + method.getName().substring(3);
-
- try
- {
- // Read the statistic.
- Object statValue = method.invoke(stats);
-
- // Create an attribute from the statistic.
- AttributeType attrType = DirectoryServer.getAttributeTypeOrDefault(attrName, attrName, integerSyntax);
- monitorAttrs.add(Attributes.create(attrType, String.valueOf(statValue)));
- } catch (Exception e)
- {
- logger.traceException(e);
- }
- }
- }
- }
- }
-
- /**
- * Retrieves a set of attributes containing monitor data that should be
- * returned to the client if the corresponding monitor entry is requested.
- *
- * @return A set of attributes containing monitor data that should be
- * returned to the client if the corresponding monitor entry is
- * requested.
- */
- @Override
- public List<Attribute> getMonitorData()
- {
- EnvironmentStats environmentStats = null;
- TransactionStats transactionStats = null;
- StatsConfig statsConfig = new StatsConfig();
-
- try
- {
- environmentStats = rootContainer.getEnvironmentStats(statsConfig);
- transactionStats =
- rootContainer.getEnvironmentTransactionStats(statsConfig);
- } catch (DatabaseException e)
- {
- logger.traceException(e);
- return Collections.emptyList();
- }
-
- ArrayList<Attribute> monitorAttrs = new ArrayList<>();
- String jeVersion = JEVersion.CURRENT_VERSION.getVersionString();
- monitorAttrs.add(Attributes.create("JEVersion", jeVersion));
-
- addAttributesForStatsObject(monitorAttrs, environmentStats, "Environment");
- addAttributesForStatsObject(monitorAttrs, transactionStats, "Transaction");
-
- AttributeBuilder needReindex = new AttributeBuilder("need-reindex");
- for(EntryContainer ec : rootContainer.getEntryContainers())
- {
- List<DatabaseContainer> databases = new ArrayList<>();
- ec.listDatabases(databases);
- for(DatabaseContainer dc : databases)
- {
- if(dc instanceof Index && !((Index)dc).isTrusted())
- {
- needReindex.add(dc.getName());
- }
- }
- }
- if(needReindex.size() > 0)
- {
- monitorAttrs.add(needReindex.toAttribute());
- }
-
- if(filterUseEnabled)
- {
- monitorAttrs.add(Attributes.create("filter-use-startTime", startTimeStamp));
- AttributeBuilder builder = new AttributeBuilder("filter-use");
-
- StringBuilder stringBuilder = new StringBuilder();
- synchronized(filterToStats)
- {
- for(Map.Entry<SearchFilter, FilterStats> entry :
- filterToStats.entrySet())
- {
- entry.getKey().toString(stringBuilder);
- stringBuilder.append(" hits:");
- stringBuilder.append(entry.getValue().hits.get());
- stringBuilder.append(" maxmatches:");
- stringBuilder.append(entry.getValue().maxMatchingEntries);
- stringBuilder.append(" message:");
- stringBuilder.append(entry.getValue().failureReason);
- builder.add(stringBuilder.toString());
- stringBuilder.setLength(0);
- }
- }
- monitorAttrs.add(builder.toAttribute());
- monitorAttrs.add(Attributes.create("filter-use-indexed",
- String.valueOf(indexedSearchCount.get())));
- monitorAttrs.add(Attributes.create("filter-use-unindexed",
- String.valueOf(unindexedSearchCount.get())));
- }
-
- return monitorAttrs;
- }
-
-
- /**
- * Updates the index filter statistics with this latest search filter
- * and the reason why an index was not used.
- *
- * @param searchFilter The search filter that was evaluated.
- * @param failureMessage The reason why an index was not used.
- */
- public void updateStats(SearchFilter searchFilter, LocalizableMessage failureMessage)
- {
- if(!filterUseEnabled)
- {
- return;
- }
-
- FilterStats stats;
- synchronized(filterToStats)
- {
- stats = filterToStats.get(searchFilter);
-
-
- if(stats != null)
- {
- stats.update(1, failureMessage);
- }
- else
- {
- stats = new FilterStats();
- stats.update(1, failureMessage);
- removeLowestHit();
- filterToStats.put(searchFilter, stats);
- }
- }
- }
-
- /**
- * Updates the index filter statistics with this latest search filter
- * and the number of entries matched by the index lookup.
- *
- * @param searchFilter The search filter that was evaluated.
- * @param matchingEntries The number of entries matched by the successful
- * index lookup.
- */
- public void updateStats(SearchFilter searchFilter, long matchingEntries)
- {
- if(!filterUseEnabled)
- {
- return;
- }
-
- FilterStats stats;
- synchronized(filterToStats)
- {
- stats = filterToStats.get(searchFilter);
-
-
- if(stats != null)
- {
- stats.update(1, matchingEntries);
- }
- else
- {
- stats = new FilterStats();
- stats.update(1, matchingEntries);
- removeLowestHit();
- filterToStats.put(searchFilter, stats);
- }
- }
- }
-
- /**
- * Enable or disable index filter statistics gathering.
- *
- * @param enabled <code>true></code> to enable index filter statics gathering.
- */
- public void enableFilterUseStats(boolean enabled)
- {
- if(enabled && !filterUseEnabled)
- {
- startTimeStamp = TimeThread.getGMTTime();
- indexedSearchCount.set(0);
- unindexedSearchCount.set(0);
- }
- else if(!enabled)
- {
- filterToStats.clear();
- }
- filterUseEnabled = enabled;
- }
-
- /**
- * Indicates if index filter statistics gathering is enabled.
- *
- * @return <code>true</code> If index filter statistics gathering is enabled.
- */
- public boolean isFilterUseEnabled()
- {
- return filterUseEnabled;
- }
-
- /**
- * Sets the maximum number of search filters statistics entries to keep
- * before ones with the least hits will be removed.
- *
- * @param maxEntries The maximum number of search filters statistics
- * entries to keep
- */
- public void setMaxEntries(int maxEntries) {
- this.maxEntries = maxEntries;
- }
-
- /**
- * Updates the statistics counter to include an indexed search.
- */
- public void updateIndexedSearchCount()
- {
- indexedSearchCount.getAndIncrement();
- }
-
- /**
- * Updates the statistics counter to include an unindexed search.
- */
- public void updateUnindexedSearchCount()
- {
- unindexedSearchCount.getAndIncrement();
- }
-
- private void removeLowestHit()
- {
- while(!filterToStats.isEmpty() && filterToStats.size() > maxEntries)
- {
- Iterator<Map.Entry<SearchFilter, FilterStats>> i =
- filterToStats.entrySet().iterator();
- Map.Entry<SearchFilter, FilterStats> lowest = i.next();
- Map.Entry<SearchFilter, FilterStats> entry;
- while(lowest.getValue().hits.get() > 1 && i.hasNext())
- {
- entry = i.next();
- if(entry.getValue().hits.get() < lowest.getValue().hits.get())
- {
- lowest = entry;
- }
- }
-
- filterToStats.remove(lowest.getKey());
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DbPreloadComparator.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DbPreloadComparator.java
deleted file mode 100644
index 76b1366..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/DbPreloadComparator.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-
-import java.util.Comparator;
-
-/**
- * This comparator is used to sort databases in order of priority
- * for preloading into the cache.
- */
-public class DbPreloadComparator
- implements Comparator<DatabaseContainer>
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
-
- /**
- * Calculate the relative priority of a database for preloading.
- *
- * @param database A handle to the database.
- * @return 1 for id2entry database, 2 for dn2id database, 3 for all others.
- */
- private static int priority(DatabaseContainer database)
- {
- String name = database.getName();
- if (name.endsWith(EntryContainer.ID2ENTRY_DATABASE_NAME))
- {
- return 1;
- }
- else if (name.endsWith(EntryContainer.DN2ID_DATABASE_NAME))
- {
- return 2;
- }
- else
- {
- return 3;
- }
- }
-
- /**
- * Compares its two arguments for order. Returns a negative integer,
- * zero, or a positive integer as the first argument is less than, equal
- * to, or greater than the second.
- *
- * @param database1 the first object to be compared.
- * @param database2 the second object to be compared.
- * @return a negative integer, zero, or a positive integer as the
- * first argument is less than, equal to, or greater than the
- * second.
- **/
- public int compare(DatabaseContainer database1, DatabaseContainer database2)
- {
- return priority(database1) - priority(database2);
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryContainer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryContainer.java
deleted file mode 100644
index 9672c64..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryContainer.java
+++ /dev/null
@@ -1,3361 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- * Portions copyright 2013 Manuel Gaupp
- */
-package org.opends.server.backends.jeb;
-
-import java.util.*;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.LocalizableMessageBuilder;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.config.server.ConfigChangeResult;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.opendj.ldap.SearchScope;
-import org.forgerock.util.Utils;
-import org.opends.server.admin.server.ConfigurationAddListener;
-import org.opends.server.admin.server.ConfigurationChangeListener;
-import org.opends.server.admin.server.ConfigurationDeleteListener;
-import org.opends.server.admin.std.server.LocalDBBackendCfg;
-import org.opends.server.admin.std.server.LocalDBIndexCfg;
-import org.opends.server.admin.std.server.LocalDBVLVIndexCfg;
-import org.opends.server.api.ClientConnection;
-import org.opends.server.api.EntryCache;
-import org.opends.server.api.plugin.PluginResult.SubordinateDelete;
-import org.opends.server.api.plugin.PluginResult.SubordinateModifyDN;
-import org.opends.server.backends.pluggable.SuffixContainer;
-import org.opends.server.controls.*;
-import org.opends.server.core.*;
-import org.opends.server.types.*;
-import org.opends.server.util.ServerConstants;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.*;
-
-import static com.sleepycat.je.LockMode.*;
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.backends.jeb.JebFormat.*;
-import static org.opends.server.core.DirectoryServer.*;
-import static org.opends.server.protocols.ldap.LDAPResultCode.*;
-import static org.opends.server.types.AdditionalLogItem.*;
-import static org.opends.server.util.StaticUtils.*;
-
-/**
- * Storage container for LDAP entries. Each base DN of a JE backend is given
- * its own entry container. The entry container is the object that implements
- * the guts of the backend API methods for LDAP operations.
- */
-public class EntryContainer
- implements SuffixContainer, ConfigurationChangeListener<LocalDBBackendCfg>
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The name of the entry database. */
- public static final String ID2ENTRY_DATABASE_NAME = ID2ENTRY_INDEX_NAME;
- /** The name of the DN database. */
- public static final String DN2ID_DATABASE_NAME = DN2ID_INDEX_NAME;
- /** The name of the children index database. */
- private static final String ID2CHILDREN_DATABASE_NAME = ID2CHILDREN_INDEX_NAME;
- /** The name of the subtree index database. */
- private static final String ID2SUBTREE_DATABASE_NAME = ID2SUBTREE_INDEX_NAME;
- /** The name of the referral database. */
- private static final String REFERRAL_DATABASE_NAME = REFERRAL_INDEX_NAME;
- /** The name of the state database. */
- private static final String STATE_DATABASE_NAME = STATE_INDEX_NAME;
-
- /** The attribute index configuration manager. */
- private final AttributeJEIndexCfgManager attributeJEIndexCfgManager;
- /** The vlv index configuration manager. */
- private final VLVJEIndexCfgManager vlvJEIndexCfgManager;
-
- /** ID of the backend to which this entry container belongs. */
- private final String backendID;
-
- /** The root container in which this entryContainer belongs. */
- private final RootContainer rootContainer;
-
- /** The baseDN this entry container is responsible for. */
- private final DN baseDN;
-
- /** The backend configuration. */
- private LocalDBBackendCfg config;
-
- /** The JE database environment. */
- private final Environment env;
-
- /** The DN database maps a normalized DN string to an entry ID (8 bytes). */
- private DN2ID dn2id;
- /** The entry database maps an entry ID (8 bytes) to a complete encoded entry. */
- private ID2Entry id2entry;
- /** Index maps entry ID to an entry ID list containing its children. */
- private Index id2children;
- /** Index maps entry ID to an entry ID list containing its subordinates. */
- private Index id2subtree;
- /** The referral database maps a normalized DN string to labeled URIs. */
- private DN2URI dn2uri;
- /** The state database maps a config DN to config entries. */
- private State state;
-
- /** The set of attribute indexes. */
- private final HashMap<AttributeType, AttributeIndex> attrIndexMap = new HashMap<>();
- /** The set of VLV (Virtual List View) indexes. */
- private final HashMap<String, VLVIndex> vlvIndexMap = new HashMap<>();
-
- /**
- * Prevents name clashes for common indexes (like id2entry) across multiple suffixes.
- * For example when a root container contains multiple suffixes.
- */
- private String databasePrefix;
-
- /**
- * This class is responsible for managing the configuration for attribute
- * indexes used within this entry container.
- */
- private class AttributeJEIndexCfgManager implements
- ConfigurationAddListener<LocalDBIndexCfg>,
- ConfigurationDeleteListener<LocalDBIndexCfg>
- {
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationAddAcceptable(
- LocalDBIndexCfg cfg,
- List<LocalizableMessage> unacceptableReasons)
- {
- try
- {
- //Try creating all the indexes before confirming they are valid ones.
- new AttributeIndex(cfg, EntryContainer.this);
- return true;
- }
- catch(Exception e)
- {
- unacceptableReasons.add(LocalizableMessage.raw(e.getLocalizedMessage()));
- return false;
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public ConfigChangeResult applyConfigurationAdd(LocalDBIndexCfg cfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
-
- try
- {
- AttributeIndex index = new AttributeIndex(cfg, EntryContainer.this);
- index.open();
- if(!index.isTrusted())
- {
- ccr.setAdminActionRequired(true);
- ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(cfg.getAttribute().getNameOrOID()));
- }
- attrIndexMap.put(cfg.getAttribute(), index);
- }
- catch(Exception e)
- {
- ccr.addMessage(LocalizableMessage.raw(e.getLocalizedMessage()));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- }
-
- return ccr;
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationDeleteAcceptable(
- LocalDBIndexCfg cfg, List<LocalizableMessage> unacceptableReasons)
- {
- // TODO: validate more before returning true?
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public ConfigChangeResult applyConfigurationDelete(LocalDBIndexCfg cfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
-
- exclusiveLock.lock();
- try
- {
- AttributeIndex index = attrIndexMap.get(cfg.getAttribute());
- deleteAttributeIndex(index);
- attrIndexMap.remove(cfg.getAttribute());
- }
- catch(DatabaseException de)
- {
- ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(de)));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- }
- finally
- {
- exclusiveLock.unlock();
- }
-
- return ccr;
- }
- }
-
- /**
- * This class is responsible for managing the configuration for VLV indexes
- * used within this entry container.
- */
- private class VLVJEIndexCfgManager implements
- ConfigurationAddListener<LocalDBVLVIndexCfg>,
- ConfigurationDeleteListener<LocalDBVLVIndexCfg>
- {
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationAddAcceptable(
- LocalDBVLVIndexCfg cfg, List<LocalizableMessage> unacceptableReasons)
- {
- try
- {
- SearchFilter.createFilterFromString(cfg.getFilter());
- }
- catch(Exception e)
- {
- unacceptableReasons.add(
- ERR_CONFIG_VLV_INDEX_BAD_FILTER.get(cfg.getFilter(), cfg.getName(), e.getLocalizedMessage()));
- return false;
- }
-
- String[] sortAttrs = cfg.getSortOrder().split(" ");
- SortKey[] sortKeys = new SortKey[sortAttrs.length];
- boolean[] ascending = new boolean[sortAttrs.length];
- for(int i = 0; i < sortAttrs.length; i++)
- {
- try
- {
- if(sortAttrs[i].startsWith("-"))
- {
- ascending[i] = false;
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- else
- {
- ascending[i] = true;
- if(sortAttrs[i].startsWith("+"))
- {
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- }
- }
- catch(Exception e)
- {
- unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortKeys[i], cfg.getName()));
- return false;
- }
-
- AttributeType attrType =
- DirectoryServer.getAttributeTypeOrNull(sortAttrs[i].toLowerCase());
- if(attrType == null)
- {
- unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortAttrs[i], cfg.getName()));
- return false;
- }
- sortKeys[i] = new SortKey(attrType, ascending[i]);
- }
-
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public ConfigChangeResult applyConfigurationAdd(LocalDBVLVIndexCfg cfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
-
- try
- {
- VLVIndex vlvIndex = new VLVIndex(cfg, state, env, EntryContainer.this);
- vlvIndex.open();
- if(!vlvIndex.isTrusted())
- {
- ccr.setAdminActionRequired(true);
- ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(cfg.getName()));
- }
- vlvIndexMap.put(cfg.getName().toLowerCase(), vlvIndex);
- }
- catch(Exception e)
- {
- ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(e)));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- }
-
- return ccr;
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationDeleteAcceptable(
- LocalDBVLVIndexCfg cfg,
- List<LocalizableMessage> unacceptableReasons)
- {
- // TODO: validate more before returning true?
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public ConfigChangeResult applyConfigurationDelete(LocalDBVLVIndexCfg cfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
-
- exclusiveLock.lock();
- try
- {
- VLVIndex vlvIndex =
- vlvIndexMap.get(cfg.getName().toLowerCase());
- deleteDatabase(vlvIndex);
- vlvIndexMap.remove(cfg.getName());
- }
- catch(DatabaseException de)
- {
- ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(de)));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- }
- finally
- {
- exclusiveLock.unlock();
- }
-
- return ccr;
- }
-
- }
-
- /** A read write lock to handle schema changes and bulk changes. */
- private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
- final Lock sharedLock = lock.readLock();
- final Lock exclusiveLock = lock.writeLock();
-
- /**
- * Create a new entry container object.
- *
- * @param baseDN The baseDN this entry container will be responsible for
- * storing on disk.
- * @param databasePrefix The prefix to use in the database names used by
- * this entry container.
- * @param backendID ID of the JE backend that is creating this entry
- * container. It is needed by the Directory Server
- * entry cache methods.
- * @param config The configuration of the JE backend.
- * @param env The JE environment to create this entryContainer in.
- * @param rootContainer The root container this entry container is in.
- * @throws ConfigException if a configuration related error occurs.
- */
- EntryContainer(DN baseDN, String databasePrefix, String backendID,
- LocalDBBackendCfg config, Environment env, RootContainer rootContainer)
- throws ConfigException
- {
- this.backendID = backendID;
- this.baseDN = baseDN;
- this.config = config;
- this.env = env;
- this.rootContainer = rootContainer;
-
- this.databasePrefix = preparePrefix(databasePrefix);
-
- config.addLocalDBChangeListener(this);
-
- attributeJEIndexCfgManager = new AttributeJEIndexCfgManager();
- config.addLocalDBIndexAddListener(attributeJEIndexCfgManager);
- config.addLocalDBIndexDeleteListener(attributeJEIndexCfgManager);
-
- vlvJEIndexCfgManager = new VLVJEIndexCfgManager();
- config.addLocalDBVLVIndexAddListener(vlvJEIndexCfgManager);
- config.addLocalDBVLVIndexDeleteListener(vlvJEIndexCfgManager);
- }
-
- /**
- * Opens the entryContainer for reading and writing.
- *
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws ConfigException if a configuration related error occurs.
- */
- void open() throws DatabaseException, ConfigException
- {
- try
- {
- DataConfig entryDataConfig =
- new DataConfig(config.isEntriesCompressed(),
- config.isCompactEncoding(),
- rootContainer.getCompressedSchema());
-
- id2entry = new ID2Entry(databasePrefix + "_" + ID2ENTRY_DATABASE_NAME,
- entryDataConfig, env, this);
- id2entry.open();
-
- dn2id = new DN2ID(databasePrefix + "_" + DN2ID_DATABASE_NAME, env, this);
- dn2id.open();
-
- state = new State(databasePrefix + "_" + STATE_DATABASE_NAME, env, this);
- state.open();
-
- if (config.isSubordinateIndexesEnabled())
- {
- openSubordinateIndexes();
- }
- else
- {
- // Use a null index and ensure that future attempts to use the real
- // subordinate indexes will fail.
- id2children = new NullIndex(databasePrefix + "_"
- + ID2CHILDREN_DATABASE_NAME, new ID2CIndexer(), state, env, this);
- if (!env.getConfig().getReadOnly())
- {
- state.putIndexTrustState(null, id2children, false);
- }
- id2children.open(); // No-op
-
- id2subtree = new NullIndex(databasePrefix + "_"
- + ID2SUBTREE_DATABASE_NAME, new ID2SIndexer(), state, env, this);
- if (!env.getConfig().getReadOnly())
- {
- state.putIndexTrustState(null, id2subtree, false);
- }
- id2subtree.open(); // No-op
-
- logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, backendID);
- }
-
- dn2uri = new DN2URI(databasePrefix + "_" + REFERRAL_DATABASE_NAME, env, this);
- dn2uri.open();
-
- for (String idx : config.listLocalDBIndexes())
- {
- LocalDBIndexCfg indexCfg = config.getLocalDBIndex(idx);
-
- AttributeIndex index = new AttributeIndex(indexCfg, this);
- index.open();
- if(!index.isTrusted())
- {
- logger.info(NOTE_INDEX_ADD_REQUIRES_REBUILD, index.getName());
- }
- attrIndexMap.put(indexCfg.getAttribute(), index);
- }
-
- for(String idx : config.listLocalDBVLVIndexes())
- {
- LocalDBVLVIndexCfg vlvIndexCfg = config.getLocalDBVLVIndex(idx);
-
- VLVIndex vlvIndex = new VLVIndex(vlvIndexCfg, state, env, this);
- vlvIndex.open();
-
- if(!vlvIndex.isTrusted())
- {
- logger.info(NOTE_INDEX_ADD_REQUIRES_REBUILD, vlvIndex.getName());
- }
-
- vlvIndexMap.put(vlvIndexCfg.getName().toLowerCase(), vlvIndex);
- }
- }
- catch (DatabaseException de)
- {
- logger.traceException(de);
- close();
- throw de;
- }
- }
-
- /**
- * Closes the entry container.
- *
- * @throws DatabaseException If an error occurs in the JE database.
- */
- @Override
- public void close() throws DatabaseException
- {
- // Close core indexes.
- dn2id.close();
- id2entry.close();
- dn2uri.close();
- id2children.close();
- id2subtree.close();
- state.close();
-
- Utils.closeSilently(attrIndexMap.values());
-
- for (VLVIndex vlvIndex : vlvIndexMap.values())
- {
- vlvIndex.close();
- }
-
- // Deregister any listeners.
- config.removeLocalDBChangeListener(this);
- config.removeLocalDBIndexAddListener(attributeJEIndexCfgManager);
- config.removeLocalDBIndexDeleteListener(attributeJEIndexCfgManager);
- config.removeLocalDBVLVIndexAddListener(vlvJEIndexCfgManager);
- config.removeLocalDBVLVIndexDeleteListener(vlvJEIndexCfgManager);
- }
-
- /**
- * Retrieves a reference to the root container in which this entry container
- * exists.
- *
- * @return A reference to the root container in which this entry container
- * exists.
- */
- public RootContainer getRootContainer()
- {
- return rootContainer;
- }
-
- /**
- * Get the DN database used by this entry container.
- * The entryContainer must have been opened.
- *
- * @return The DN database.
- */
- public DN2ID getDN2ID()
- {
- return dn2id;
- }
-
- /**
- * Get the entry database used by this entry container.
- * The entryContainer must have been opened.
- *
- * @return The entry database.
- */
- public ID2Entry getID2Entry()
- {
- return id2entry;
- }
-
- /**
- * Get the referral database used by this entry container.
- * The entryContainer must have been opened.
- *
- * @return The referral database.
- */
- public DN2URI getDN2URI()
- {
- return dn2uri;
- }
-
- /**
- * Get the children database used by this entry container.
- * The entryContainer must have been opened.
- *
- * @return The children database.
- */
- public Index getID2Children()
- {
- return id2children;
- }
-
- /**
- * Get the subtree database used by this entry container.
- * The entryContainer must have been opened.
- *
- * @return The subtree database.
- */
- public Index getID2Subtree()
- {
- return id2subtree;
- }
-
- /**
- * Get the state database used by this entry container.
- * The entry container must have been opened.
- *
- * @return The state database.
- */
- public State getState()
- {
- return state;
- }
-
- /**
- * Look for an attribute index for the given attribute type.
- *
- * @param attrType The attribute type for which an attribute index is needed.
- * @return The attribute index or null if there is none for that type.
- */
- AttributeIndex getAttributeIndex(AttributeType attrType)
- {
- return attrIndexMap.get(attrType);
- }
-
- /**
- * Look for an VLV index for the given index name.
- *
- * @param vlvIndexName The vlv index name for which an vlv index is needed.
- * @return The VLV index or null if there is none with that name.
- */
- VLVIndex getVLVIndex(String vlvIndexName)
- {
- return vlvIndexMap.get(vlvIndexName);
- }
-
- /**
- * Retrieve all attribute indexes.
- *
- * @return All attribute indexes defined in this entry container.
- */
- public Collection<AttributeIndex> getAttributeIndexes()
- {
- return attrIndexMap.values();
- }
-
- /**
- * Retrieve all VLV indexes.
- *
- * @return The collection of VLV indexes defined in this entry container.
- */
- public Collection<VLVIndex> getVLVIndexes()
- {
- return vlvIndexMap.values();
- }
-
- /**
- * Determine the highest entryID in the entryContainer.
- * The entryContainer must already be open.
- *
- * @return The highest entry ID.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public EntryID getHighestEntryID() throws DatabaseException
- {
- Cursor cursor = id2entry.openCursor(null, null);
- try
- {
- // Position a cursor on the last data item, and the key should give the highest ID.
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- if (cursor.getLast(key, data, DEFAULT) == OperationStatus.SUCCESS)
- {
- return new EntryID(key);
- }
- return new EntryID(0);
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Determine the number of subordinate entries for a given entry.
- *
- * @param entryDN The distinguished name of the entry.
- * @param subtree <code>true</code> will include the entry and all the
- * entries under the given entries. <code>false</code>
- * will only return the number of entries immediately
- * under the given entry.
- * @return The number of subordinate entries for the given entry or -1 if
- * the entry does not exist.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- long getNumSubordinates(DN entryDN, boolean subtree)
- throws DatabaseException
- {
- EntryID entryID = dn2id.get(null, entryDN, LockMode.DEFAULT);
- if (entryID != null)
- {
- DatabaseEntry key = new DatabaseEntry(entryIDToDatabase(entryID.longValue()));
- final EntryIDSet entryIDSet;
- long count;
- if (subtree)
- {
- count = dn2id.get(null, entryDN, LockMode.DEFAULT) != null ? 1 : 0;
- entryIDSet = id2subtree.readKey(key, null, LockMode.DEFAULT);
- }
- else
- {
- count = 0;
- entryIDSet = id2children.readKey(key, null, LockMode.DEFAULT);
- }
- if(entryIDSet.size() == Long.MAX_VALUE)
- {
- return -1;
- }
- return count + entryIDSet.size();
- }
- return -1;
- }
-
- /**
- * Processes the specified search in this entryContainer.
- * Matching entries should be provided back to the core server using the
- * <CODE>SearchOperation.returnEntry</CODE> method.
- *
- * @param searchOperation The search operation to be processed.
- * @throws DirectoryException
- * If a problem occurs while processing the search.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws CanceledOperationException if this operation should be cancelled.
- */
- void search(SearchOperation searchOperation)
- throws DirectoryException, DatabaseException, CanceledOperationException
- {
- DN aBaseDN = searchOperation.getBaseDN();
- SearchScope searchScope = searchOperation.getScope();
-
- PagedResultsControl pageRequest = searchOperation
- .getRequestControl(PagedResultsControl.DECODER);
- ServerSideSortRequestControl sortRequest = searchOperation
- .getRequestControl(ServerSideSortRequestControl.DECODER);
- if(sortRequest != null && !sortRequest.containsSortKeys()
- && sortRequest.isCritical())
- {
- /*
- If the control's criticality field is true then the server SHOULD do
- the following: return unavailableCriticalExtension as a return code
- in the searchResultDone message; include the sortKeyResponseControl in
- the searchResultDone message, and not send back any search result
- entries.
- */
- searchOperation.addResponseControl(new ServerSideSortResponseControl(NO_SUCH_ATTRIBUTE, null));
- searchOperation.setResultCode(ResultCode.UNAVAILABLE_CRITICAL_EXTENSION);
- return;
- }
- VLVRequestControl vlvRequest = searchOperation.getRequestControl(VLVRequestControl.DECODER);
-
- if (vlvRequest != null && pageRequest != null)
- {
- throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, ERR_SEARCH_CANNOT_MIX_PAGEDRESULTS_AND_VLV.get());
- }
-
- // Handle client abandon of paged results.
- if (pageRequest != null)
- {
- if (pageRequest.getSize() == 0)
- {
- Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null);
- searchOperation.getResponseControls().add(control);
- return;
- }
- if (searchOperation.getSizeLimit() > 0 &&
- pageRequest.getSize() >= searchOperation.getSizeLimit())
- {
- // The RFC says : "If the page size is greater than or equal to the
- // sizeLimit value, the server should ignore the control as the
- // request can be satisfied in a single page"
- pageRequest = null;
- }
- }
-
- // Handle base-object search first.
- if (searchScope == SearchScope.BASE_OBJECT)
- {
- // Fetch the base entry.
- Entry baseEntry = fetchBaseEntry(aBaseDN, searchScope);
-
- if (!isManageDsaITOperation(searchOperation))
- {
- dn2uri.checkTargetForReferral(baseEntry, searchOperation.getScope());
- }
-
- if (searchOperation.getFilter().matchesEntry(baseEntry))
- {
- searchOperation.returnEntry(baseEntry, null);
- }
-
- if (pageRequest != null)
- {
- // Indicate no more pages.
- Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null);
- searchOperation.getResponseControls().add(control);
- }
-
- return;
- }
-
- // Check whether the client requested debug information about the
- // contribution of the indexes to the search.
- StringBuilder debugBuffer = null;
- if (searchOperation.getAttributes().contains(ATTR_DEBUG_SEARCH_INDEX))
- {
- debugBuffer = new StringBuilder();
- }
-
- EntryIDSet entryIDList = null;
- boolean candidatesAreInScope = false;
- if(sortRequest != null)
- {
- for(VLVIndex vlvIndex : vlvIndexMap.values())
- {
- try
- {
- entryIDList = vlvIndex.evaluate(null, searchOperation, sortRequest, vlvRequest, debugBuffer);
- if(entryIDList != null)
- {
- searchOperation.addResponseControl(new ServerSideSortResponseControl(SUCCESS, null));
- candidatesAreInScope = true;
- break;
- }
- }
- catch (DirectoryException de)
- {
- searchOperation.addResponseControl(new ServerSideSortResponseControl(de.getResultCode().intValue(), null));
-
- if (sortRequest.isCritical())
- {
- throw de;
- }
- }
- }
- }
-
- if(entryIDList == null)
- {
- // See if we could use a virtual attribute rule to process the search.
- for (VirtualAttributeRule rule : DirectoryServer.getVirtualAttributes())
- {
- if (rule.getProvider().isSearchable(rule, searchOperation, true))
- {
- rule.getProvider().processSearch(rule, searchOperation);
- return;
- }
- }
-
- // Create an index filter to get the search result candidate entries.
- IndexFilter indexFilter =
- new IndexFilter(this, searchOperation, debugBuffer,
- rootContainer.getMonitorProvider());
-
- // Evaluate the filter against the attribute indexes.
- entryIDList = indexFilter.evaluate();
-
- // Evaluate the search scope against the id2children and id2subtree
- // indexes.
- if (entryIDList.size() > IndexFilter.FILTER_CANDIDATE_THRESHOLD)
- {
- // Read the ID from dn2id.
- EntryID baseID = dn2id.get(null, aBaseDN, LockMode.DEFAULT);
- if (baseID == null)
- {
- throw new DirectoryException(
- ResultCode.NO_SUCH_OBJECT, ERR_SEARCH_NO_SUCH_OBJECT.get(aBaseDN), getMatchedDN(aBaseDN), null);
- }
- DatabaseEntry baseIDData = baseID.getDatabaseEntry();
-
- EntryIDSet scopeList;
- if (searchScope == SearchScope.SINGLE_LEVEL)
- {
- scopeList = id2children.readKey(baseIDData, null, LockMode.DEFAULT);
- }
- else
- {
- scopeList = id2subtree.readKey(baseIDData, null, LockMode.DEFAULT);
- if (searchScope == SearchScope.WHOLE_SUBTREE)
- {
- // The id2subtree list does not include the base entry ID.
- scopeList.add(baseID);
- }
- }
- entryIDList.retainAll(scopeList);
- if (debugBuffer != null)
- {
- debugBuffer.append(" scope=");
- debugBuffer.append(searchScope);
- scopeList.toString(debugBuffer);
- }
- if (scopeList.isDefined())
- {
- // In this case we know that every candidate is in scope.
- candidatesAreInScope = true;
- }
- }
-
- if (sortRequest != null)
- {
- try
- {
- //If the sort key is not present, the sorting will generate the
- //default ordering. VLV search request goes through as if
- //this sort key was not found in the user entry.
- entryIDList = EntryIDSetSorter.sort(this, entryIDList,
- searchOperation,
- sortRequest.getSortOrder(),
- vlvRequest);
- if(sortRequest.containsSortKeys())
- {
- searchOperation.addResponseControl(new ServerSideSortResponseControl(SUCCESS, null));
- }
- else
- {
- /*
- * There is no sort key associated with the sort control. Since it
- * came here it means that the criticality is false so let the
- * server return all search results unsorted and include the
- * sortKeyResponseControl in the searchResultDone message.
- */
- searchOperation.addResponseControl(new ServerSideSortResponseControl(NO_SUCH_ATTRIBUTE, null));
- }
- }
- catch (DirectoryException de)
- {
- searchOperation.addResponseControl(new ServerSideSortResponseControl(de.getResultCode().intValue(), null));
-
- if (sortRequest.isCritical())
- {
- throw de;
- }
- }
- }
- }
-
- // If requested, construct and return a fictitious entry containing
- // debug information, and no other entries.
- if (debugBuffer != null)
- {
- debugBuffer.append(" final=");
- entryIDList.toString(debugBuffer);
-
- Attribute attr = Attributes.create(ATTR_DEBUG_SEARCH_INDEX, debugBuffer.toString());
- Entry debugEntry = new Entry(DN.valueOf("cn=debugsearch"), null, null, null);
- debugEntry.addAttribute(attr, new ArrayList<ByteString>());
-
- searchOperation.returnEntry(debugEntry, null);
- return;
- }
-
- if (entryIDList.isDefined())
- {
- if(rootContainer.getMonitorProvider().isFilterUseEnabled())
- {
- rootContainer.getMonitorProvider().updateIndexedSearchCount();
- }
- searchIndexed(entryIDList, candidatesAreInScope, searchOperation, pageRequest);
- }
- else
- {
- if(rootContainer.getMonitorProvider().isFilterUseEnabled())
- {
- rootContainer.getMonitorProvider().updateUnindexedSearchCount();
- }
-
- searchOperation.addAdditionalLogItem(keyOnly(getClass(), "unindexed"));
-
- // See if we could use a virtual attribute rule to process the search.
- for (VirtualAttributeRule rule : DirectoryServer.getVirtualAttributes())
- {
- if (rule.getProvider().isSearchable(rule, searchOperation, false))
- {
- rule.getProvider().processSearch(rule, searchOperation);
- return;
- }
- }
-
- ClientConnection clientConnection = searchOperation.getClientConnection();
- if (!clientConnection.hasPrivilege(Privilege.UNINDEXED_SEARCH, searchOperation))
- {
- throw new DirectoryException(
- ResultCode.INSUFFICIENT_ACCESS_RIGHTS, ERR_SEARCH_UNINDEXED_INSUFFICIENT_PRIVILEGES.get());
- }
-
- if (sortRequest != null)
- {
- // FIXME -- Add support for sorting unindexed searches using indexes
- // like DSEE currently does.
- searchOperation.addResponseControl(new ServerSideSortResponseControl(UNWILLING_TO_PERFORM, null));
-
- if (sortRequest.isCritical())
- {
- throw new DirectoryException(
- ResultCode.UNAVAILABLE_CRITICAL_EXTENSION, ERR_SEARCH_CANNOT_SORT_UNINDEXED.get());
- }
- }
-
- searchNotIndexed(searchOperation, pageRequest);
- }
- }
-
- /**
- * We were not able to obtain a set of candidate entry IDs for the
- * search from the indexes.
- * <p>
- * Here we are relying on the DN key order to ensure children are
- * returned after their parents.
- * <ul>
- * <li>iterate through a subtree range of the DN database
- * <li>discard non-children DNs if the search scope is single level
- * <li>fetch the entry by ID from the entry cache or the entry database
- * <li>return the entry if it matches the filter
- * </ul>
- *
- * @param searchOperation The search operation.
- * @param pageRequest A Paged Results control, or null if none.
- * @throws DirectoryException If an error prevented the search from being
- * processed.
- */
- private void searchNotIndexed(SearchOperation searchOperation, PagedResultsControl pageRequest)
- throws DirectoryException, CanceledOperationException
- {
- DN aBaseDN = searchOperation.getBaseDN();
- SearchScope searchScope = searchOperation.getScope();
- boolean manageDsaIT = isManageDsaITOperation(searchOperation);
-
- // The base entry must already have been processed if this is
- // a request for the next page in paged results. So we skip
- // the base entry processing if the cookie is set.
- if (pageRequest == null || pageRequest.getCookie().length() == 0)
- {
- // Fetch the base entry.
- Entry baseEntry = fetchBaseEntry(aBaseDN, searchScope);
-
- if (!manageDsaIT)
- {
- dn2uri.checkTargetForReferral(baseEntry, searchScope);
- }
-
- /*
- * The base entry is only included for whole subtree search.
- */
- if (searchScope == SearchScope.WHOLE_SUBTREE
- && searchOperation.getFilter().matchesEntry(baseEntry))
- {
- searchOperation.returnEntry(baseEntry, null);
- }
-
- if (!manageDsaIT
- && !dn2uri.returnSearchReferences(searchOperation)
- && pageRequest != null)
- {
- // Indicate no more pages.
- Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null);
- searchOperation.getResponseControls().add(control);
- }
- }
-
- /*
- * We will iterate forwards through a range of the dn2id keys to
- * find subordinates of the target entry from the top of the tree
- * downwards. For example, any subordinates of "dc=example,dc=com" appear
- * in dn2id with a key ending in ",dc=example,dc=com". The entry
- * "cn=joe,ou=people,dc=example,dc=com" will appear after the entry
- * "ou=people,dc=example,dc=com".
- */
- byte[] baseDNKey = dnToDNKey(aBaseDN, this.baseDN.size());
- final byte special = 0x00;
- byte[] suffix = Arrays.copyOf(baseDNKey, baseDNKey.length+1);
- suffix[suffix.length - 1] = special;
-
- /*
- * Set the ending value to a value of equal length but slightly
- * greater than the suffix. Since keys are compared in
- * reverse order we must set the first byte (the comma).
- * No possibility of overflow here.
- */
- byte[] end = Arrays.copyOf(suffix, suffix.length);
- end[end.length - 1] = special + 1;
-
- // Set the starting value.
- byte[] begin;
- if (pageRequest != null && pageRequest.getCookie().length() != 0)
- {
- // The cookie contains the DN of the next entry to be returned.
- try
- {
- begin = pageRequest.getCookie().toByteArray();
- }
- catch (Exception e)
- {
- logger.traceException(e);
- LocalizableMessage msg = ERR_INVALID_PAGED_RESULTS_COOKIE.get(pageRequest.getCookie().toHexString());
- throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, msg, e);
- }
- }
- else
- {
- // Set the starting value to the suffix.
- begin = suffix;
- }
-
- DatabaseEntry data = new DatabaseEntry();
- DatabaseEntry key = new DatabaseEntry(begin);
-
- int lookthroughCount = 0;
- int lookthroughLimit = searchOperation.getClientConnection().getLookthroughLimit();
-
- try
- {
- Cursor cursor = dn2id.openCursor(null, null);
- try
- {
- // Initialize the cursor very close to the starting value.
- OperationStatus status = cursor.getSearchKeyRange(key, data, LockMode.DEFAULT);
-
- // Step forward until we pass the ending value.
- while (status == OperationStatus.SUCCESS)
- {
- if(lookthroughLimit > 0 && lookthroughCount > lookthroughLimit)
- {
- //Lookthrough limit exceeded
- searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED);
- searchOperation.appendErrorMessage(NOTE_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit));
- return;
- }
- int cmp = dn2id.getComparator().compare(key.getData(), end);
- if (cmp >= 0)
- {
- // We have gone past the ending value.
- break;
- }
-
- // We have found a subordinate entry.
-
- EntryID entryID = new EntryID(data);
-
- boolean isInScope =
- searchScope != SearchScope.SINGLE_LEVEL
- // Check if this entry is an immediate child.
- || findDNKeyParent(key.getData()) == baseDNKey.length;
- if (isInScope)
- {
- // Process the candidate entry.
- final Entry entry = getEntry(entryID);
- if (entry != null)
- {
- lookthroughCount++;
-
- if ((manageDsaIT || entry.getReferralURLs() == null)
- && searchOperation.getFilter().matchesEntry(entry))
- {
- if (pageRequest != null
- && searchOperation.getEntriesSent() == pageRequest.getSize())
- {
- // The current page is full.
- // Set the cookie to remember where we were.
- ByteString cookie = ByteString.wrap(key.getData());
- Control control = new PagedResultsControl(pageRequest.isCritical(), 0, cookie);
- searchOperation.getResponseControls().add(control);
- return;
- }
-
- if (!searchOperation.returnEntry(entry, null))
- {
- // We have been told to discontinue processing of the
- // search. This could be due to size limit exceeded or
- // operation cancelled.
- return;
- }
- }
- }
- }
-
- searchOperation.checkIfCanceled(false);
-
- // Move to the next record.
- status = cursor.getNext(key, data, LockMode.DEFAULT);
- }
- }
- finally
- {
- cursor.close();
- }
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- }
-
- if (pageRequest != null)
- {
- // Indicate no more pages.
- Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null);
- searchOperation.getResponseControls().add(control);
- }
- }
-
- /**
- * Returns the entry corresponding to the provided entryID.
- *
- * @param entryID
- * the id of the entry to retrieve
- * @return the entry corresponding to the provided entryID
- * @throws DirectoryException
- * If an error occurs retrieving the entry
- */
- Entry getEntry(EntryID entryID) throws DirectoryException
- {
- // Try the entry cache first.
- final EntryCache<?> entryCache = getEntryCache();
- final Entry cacheEntry = entryCache.getEntry(backendID, entryID.longValue());
- if (cacheEntry != null)
- {
- return cacheEntry;
- }
-
- final Entry entry = id2entry.get(null, entryID, LockMode.DEFAULT);
- if (entry != null)
- {
- // Put the entry in the cache making sure not to overwrite a newer copy
- // that may have been inserted since the time we read the cache.
- entryCache.putEntryIfAbsent(entry, backendID, entryID.longValue());
- }
- return entry;
- }
-
- /**
- * We were able to obtain a set of candidate entry IDs for the
- * search from the indexes.
- * <p>
- * Here we are relying on ID order to ensure children are returned
- * after their parents.
- * <ul>
- * <li>Iterate through the candidate IDs
- * <li>fetch entry by ID from cache or id2entry
- * <li>put the entry in the cache if not present
- * <li>discard entries that are not in scope
- * <li>return entry if it matches the filter
- * </ul>
- *
- * @param entryIDList The candidate entry IDs.
- * @param candidatesAreInScope true if it is certain that every candidate
- * entry is in the search scope.
- * @param searchOperation The search operation.
- * @param pageRequest A Paged Results control, or null if none.
- * @throws DirectoryException If an error prevented the search from being
- * processed.
- */
- private void searchIndexed(EntryIDSet entryIDList,
- boolean candidatesAreInScope,
- SearchOperation searchOperation,
- PagedResultsControl pageRequest)
- throws DirectoryException, CanceledOperationException
- {
- SearchScope searchScope = searchOperation.getScope();
- DN aBaseDN = searchOperation.getBaseDN();
- boolean manageDsaIT = isManageDsaITOperation(searchOperation);
- boolean continueSearch = true;
-
- // Set the starting value.
- EntryID begin = null;
- if (pageRequest != null && pageRequest.getCookie().length() != 0)
- {
- // The cookie contains the ID of the next entry to be returned.
- try
- {
- begin = new EntryID(pageRequest.getCookie().toLong());
- }
- catch (Exception e)
- {
- logger.traceException(e);
- throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM,
- ERR_INVALID_PAGED_RESULTS_COOKIE.get(pageRequest.getCookie().toHexString()), e);
- }
- }
- else if (!manageDsaIT)
- {
- // Return any search result references.
- continueSearch = dn2uri.returnSearchReferences(searchOperation);
- }
-
- // Make sure the candidate list is smaller than the lookthrough limit
- int lookthroughLimit =
- searchOperation.getClientConnection().getLookthroughLimit();
- if(lookthroughLimit > 0 && entryIDList.size() > lookthroughLimit)
- {
- //Lookthrough limit exceeded
- searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED);
- searchOperation.appendErrorMessage(NOTE_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit));
- continueSearch = false;
- }
-
- // Iterate through the index candidates.
- if (continueSearch)
- {
- for (Iterator<EntryID> it = entryIDList.iterator(begin); it.hasNext();)
- {
- final EntryID id = it.next();
-
- Entry entry;
- try
- {
- entry = getEntry(id);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- continue;
- }
-
- // Process the candidate entry.
- if (entry != null)
- {
- // Filter the entry if it is in scope.
- if (isInScope(candidatesAreInScope, searchScope, aBaseDN, entry)
- && (manageDsaIT || entry.getReferralURLs() == null)
- && searchOperation.getFilter().matchesEntry(entry))
- {
- if (pageRequest != null
- && searchOperation.getEntriesSent() == pageRequest.getSize())
- {
- // The current page is full.
- // Set the cookie to remember where we were.
- byte[] cookieBytes = id.getDatabaseEntry().getData();
- ByteString cookie = ByteString.wrap(cookieBytes);
- Control control = new PagedResultsControl(pageRequest.isCritical(), 0, cookie);
- searchOperation.getResponseControls().add(control);
- return;
- }
-
- if (!searchOperation.returnEntry(entry, null))
- {
- // We have been told to discontinue processing of the
- // search. This could be due to size limit exceeded or
- // operation cancelled.
- break;
- }
- }
- }
- }
- searchOperation.checkIfCanceled(false);
- }
-
- // Before we return success from the search we must ensure the base entry
- // exists. However, if we have returned at least one entry or subordinate
- // reference it implies the base does exist, so we can omit the check.
- if (searchOperation.getEntriesSent() == 0
- && searchOperation.getReferencesSent() == 0)
- {
- // Fetch the base entry if it exists.
- Entry baseEntry = fetchBaseEntry(aBaseDN, searchScope);
-
- if (!manageDsaIT)
- {
- dn2uri.checkTargetForReferral(baseEntry, searchScope);
- }
- }
-
- if (pageRequest != null)
- {
- // Indicate no more pages.
- Control control = new PagedResultsControl(pageRequest.isCritical(), 0, null);
- searchOperation.getResponseControls().add(control);
- }
- }
-
- private boolean isInScope(boolean candidatesAreInScope, SearchScope searchScope, DN aBaseDN, Entry entry)
- {
- DN entryDN = entry.getName();
-
- if (candidatesAreInScope)
- {
- return true;
- }
- else if (searchScope == SearchScope.SINGLE_LEVEL)
- {
- // Check if this entry is an immediate child.
- if (entryDN.size() == aBaseDN.size() + 1
- && entryDN.isDescendantOf(aBaseDN))
- {
- return true;
- }
- }
- else if (searchScope == SearchScope.WHOLE_SUBTREE)
- {
- if (entryDN.isDescendantOf(aBaseDN))
- {
- return true;
- }
- }
- else if (searchScope == SearchScope.SUBORDINATES
- && entryDN.size() > aBaseDN.size()
- && entryDN.isDescendantOf(aBaseDN))
- {
- return true;
- }
- return false;
- }
-
- /**
- * Adds the provided entry to this database. This method must ensure that the
- * entry is appropriate for the database and that no entry already exists with
- * the same DN. The caller must hold a write lock on the DN of the provided
- * entry.
- *
- * @param entry The entry to add to this database.
- * @param addOperation The add operation with which the new entry is
- * associated. This may be <CODE>null</CODE> for adds
- * performed internally.
- * @throws DirectoryException If a problem occurs while trying to add the
- * entry.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws CanceledOperationException if this operation should be cancelled.
- */
- void addEntry(Entry entry, AddOperation addOperation)
- throws DatabaseException, DirectoryException, CanceledOperationException
- {
- Transaction txn = beginTransaction();
- DN parentDN = getParentWithinBase(entry.getName());
-
- try
- {
- // Check whether the entry already exists.
- if (dn2id.get(txn, entry.getName(), LockMode.DEFAULT) != null)
- {
- throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
- ERR_ADD_ENTRY_ALREADY_EXISTS.get(entry.getName()));
- }
-
- // Check that the parent entry exists.
- EntryID parentID = null;
- if (parentDN != null)
- {
- // Check for referral entries above the target.
- dn2uri.targetEntryReferrals(entry.getName(), null);
-
- // Read the parent ID from dn2id.
- parentID = dn2id.get(txn, parentDN, LockMode.DEFAULT);
- if (parentID == null)
- {
- throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
- ERR_ADD_NO_SUCH_OBJECT.get(entry.getName()), getMatchedDN(baseDN), null);
- }
- }
-
- EntryID entryID = rootContainer.getNextEntryID();
-
- // Insert into dn2id.
- if (!dn2id.insert(txn, entry.getName(), entryID))
- {
- // Do not ever expect to come through here.
- throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
- ERR_ADD_ENTRY_ALREADY_EXISTS.get(entry.getName()));
- }
-
- // Update the referral database for referral entries.
- if (!dn2uri.addEntry(txn, entry))
- {
- // Do not ever expect to come through here.
- throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
- ERR_ADD_ENTRY_ALREADY_EXISTS.get(entry.getName()));
- }
-
- // Insert into id2entry.
- if (!id2entry.insert(txn, entryID, entry))
- {
- // Do not ever expect to come through here.
- throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
- ERR_ADD_ENTRY_ALREADY_EXISTS.get(entry.getName()));
- }
-
- // Insert into the indexes, in index configuration order.
- final IndexBuffer indexBuffer = new IndexBuffer(this);
- indexInsertEntry(indexBuffer, entry, entryID);
-
- // Insert into id2children and id2subtree.
- // The database transaction locks on these records will be hotly
- // contested so we do them last so as to hold the locks for the
- // shortest duration.
- if (parentDN != null)
- {
- final ByteString parentIDKeyBytes = toByteString(parentID);
- id2children.insertID(indexBuffer, parentIDKeyBytes, entryID);
- id2subtree.insertID(indexBuffer, parentIDKeyBytes, entryID);
-
- // Iterate up through the superior entries, starting above the parent.
- for (DN dn = getParentWithinBase(parentDN); dn != null;
- dn = getParentWithinBase(dn))
- {
- // Read the ID from dn2id.
- EntryID nodeID = dn2id.get(txn, dn, LockMode.DEFAULT);
- if (nodeID == null)
- {
- throw new JebException(ERR_MISSING_DN2ID_RECORD.get(dn));
- }
-
- // Insert into id2subtree for this node.
- id2subtree.insertID(indexBuffer, toByteString(nodeID), entryID);
- }
- }
- indexBuffer.flush(txn);
-
- if(addOperation != null)
- {
- // One last check before committing
- addOperation.checkIfCanceled(true);
- }
-
- // Commit the transaction.
- EntryContainer.transactionCommit(txn);
-
- // Update the entry cache.
- EntryCache<?> entryCache = DirectoryServer.getEntryCache();
- if (entryCache != null)
- {
- entryCache.putEntry(entry, backendID, entryID.longValue());
- }
- }
- catch (DatabaseException | DirectoryException | CanceledOperationException e)
- {
- EntryContainer.transactionAbort(txn);
- throw e;
- }
- catch (Exception e)
- {
- EntryContainer.transactionAbort(txn);
-
- String msg = e.getMessage();
- if (msg == null)
- {
- msg = stackTraceToSingleLineString(e);
- }
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- ERR_UNCHECKED_EXCEPTION.get(msg), e);
- }
- }
-
- private ByteString toByteString(EntryID entryID)
- {
- return ByteString.wrap(entryID.getDatabaseEntry().getData());
- }
-
- /**
- * Removes the specified entry from this database. This method must ensure
- * that the entry exists and that it does not have any subordinate entries
- * (unless the database supports a subtree delete operation and the client
- * included the appropriate information in the request). The caller must hold
- * a write lock on the provided entry DN.
- *
- * @param entryDN The DN of the entry to remove from this database.
- * @param deleteOperation The delete operation with which this action is
- * associated. This may be <CODE>null</CODE> for
- * deletes performed internally.
- * @throws DirectoryException If a problem occurs while trying to remove the
- * entry.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws CanceledOperationException if this operation should be cancelled.
- */
- void deleteEntry(DN entryDN, DeleteOperation deleteOperation)
- throws DirectoryException, DatabaseException, CanceledOperationException
- {
- Transaction txn = beginTransaction();
- final IndexBuffer indexBuffer = new IndexBuffer(this);
-
- try
- {
- // Check for referral entries above the target entry.
- dn2uri.targetEntryReferrals(entryDN, null);
-
- // Determine whether this is a subtree delete.
- boolean isSubtreeDelete = deleteOperation != null
- && deleteOperation.getRequestControl(SubtreeDeleteControl.DECODER) != null;
-
- /*
- * We will iterate forwards through a range of the dn2id keys to
- * find subordinates of the target entry from the top of the tree
- * downwards.
- */
- byte[] entryDNKey = dnToDNKey(entryDN, this.baseDN.size());
- byte special = 0x00;
- byte[] suffix = Arrays.copyOf(entryDNKey, entryDNKey.length+1);
- suffix[suffix.length - 1] = special;
-
- /*
- * Set the ending value to a value of equal length but slightly
- * greater than the suffix.
- */
- byte[] end = Arrays.copyOf(suffix, suffix.length);
- end[end.length - 1] = (byte) (special + 1);
-
- int subordinateEntriesDeleted = 0;
-
- DatabaseEntry data = new DatabaseEntry();
- DatabaseEntry key = new DatabaseEntry(suffix);
-
- CursorConfig cursorConfig = new CursorConfig();
- cursorConfig.setReadCommitted(true);
- Cursor cursor = dn2id.openCursor(txn, cursorConfig);
- try
- {
- // Initialize the cursor very close to the starting value.
- OperationStatus status = cursor.getSearchKeyRange(key, data, LockMode.DEFAULT);
-
- // Step forward until the key is greater than the starting value.
- while (status == OperationStatus.SUCCESS &&
- dn2id.getComparator().compare(key.getData(), suffix) <= 0)
- {
- status = cursor.getNext(key, data, LockMode.DEFAULT);
- }
-
- // Step forward until we pass the ending value.
- while (status == OperationStatus.SUCCESS)
- {
- int cmp = dn2id.getComparator().compare(key.getData(), end);
- if (cmp >= 0)
- {
- // We have gone past the ending value.
- break;
- }
-
- // We have found a subordinate entry.
- if (!isSubtreeDelete)
- {
- // The subtree delete control was not specified and
- // the target entry is not a leaf.
- throw new DirectoryException(ResultCode.NOT_ALLOWED_ON_NONLEAF,
- ERR_DELETE_NOT_ALLOWED_ON_NONLEAF.get(entryDN));
- }
-
- /*
- * Delete this entry which by now must be a leaf because
- * we have been deleting from the bottom of the tree upwards.
- */
- EntryID entryID = new EntryID(data);
-
- // Invoke any subordinate delete plugins on the entry.
- if (deleteOperation != null
- && !deleteOperation.isSynchronizationOperation())
- {
- Entry subordinateEntry = id2entry.get(txn, entryID, LockMode.DEFAULT);
- SubordinateDelete pluginResult =
- getPluginConfigManager().invokeSubordinateDeletePlugins(
- deleteOperation, subordinateEntry);
-
- if (!pluginResult.continueProcessing())
- {
- throw new DirectoryException(
- DirectoryServer.getServerErrorResultCode(),
- ERR_DELETE_ABORTED_BY_SUBORDINATE_PLUGIN.get(subordinateEntry.getName()));
- }
- }
-
- deleteEntry(txn, indexBuffer, true, entryDN, key, entryID);
- subordinateEntriesDeleted++;
-
- if(deleteOperation != null)
- {
- deleteOperation.checkIfCanceled(false);
- }
-
- // Get the next DN.
- data = new DatabaseEntry();
- status = cursor.getNext(key, data, LockMode.DEFAULT);
- }
- }
- finally
- {
- cursor.close();
- }
-
- // draft-armijo-ldap-treedelete, 4.1 Tree Delete Semantics:
- // The server MUST NOT chase referrals stored in the tree. If
- // information about referrals is stored in this section of the
- // tree, this pointer will be deleted.
- deleteEntry(txn, indexBuffer,
- isSubtreeDelete || isManageDsaITOperation(deleteOperation),
- entryDN, null, null);
-
- indexBuffer.flush(txn);
-
-
- if(deleteOperation != null)
- {
- // One last check before committing
- deleteOperation.checkIfCanceled(true);
- }
-
- // Commit the transaction.
- EntryContainer.transactionCommit(txn);
-
- if(isSubtreeDelete)
- {
- deleteOperation.addAdditionalLogItem(
- unquotedKeyValue(getClass(), "deletedEntries",
- subordinateEntriesDeleted + 1));
- }
- }
- catch (DatabaseException | DirectoryException | CanceledOperationException e)
- {
- EntryContainer.transactionAbort(txn);
- throw e;
- }
- catch (Exception e)
- {
- EntryContainer.transactionAbort(txn);
-
- String msg = e.getMessage();
- if (msg == null)
- {
- msg = stackTraceToSingleLineString(e);
- }
- LocalizableMessage message = ERR_UNCHECKED_EXCEPTION.get(msg);
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- message, e);
- }
- }
-
- private void deleteEntry(Transaction txn,
- IndexBuffer indexBuffer,
- boolean manageDsaIT,
- DN targetDN,
- DatabaseEntry leafDNKey,
- EntryID leafID)
- throws DatabaseException, DirectoryException, JebException
- {
- if(leafID == null || leafDNKey == null)
- {
- // Read the entry ID from dn2id.
- if(leafDNKey == null)
- {
- leafDNKey = new DatabaseEntry(dnToDNKey(targetDN, baseDN.size()));
- }
- DatabaseEntry value = new DatabaseEntry();
- OperationStatus status = dn2id.read(txn, leafDNKey, value, LockMode.RMW);
- if (status != OperationStatus.SUCCESS)
- {
- throw new DirectoryException(
- ResultCode.NO_SUCH_OBJECT, ERR_DELETE_NO_SUCH_OBJECT.get(leafDNKey), getMatchedDN(baseDN), null);
- }
- leafID = new EntryID(value);
- }
-
- // Remove from dn2id.
- if (dn2id.delete(txn, leafDNKey) != OperationStatus.SUCCESS)
- {
- // Do not expect to ever come through here.
- LocalizableMessage message = ERR_DELETE_NO_SUCH_OBJECT.get(leafDNKey);
- DN matchedDN = getMatchedDN(baseDN);
- throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, message, matchedDN, null);
- }
-
- // Check that the entry exists in id2entry and read its contents.
- Entry entry = id2entry.get(txn, leafID, LockMode.RMW);
- if (entry == null)
- {
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- ERR_MISSING_ID2ENTRY_RECORD.get(leafID));
- }
-
- if (!manageDsaIT)
- {
- dn2uri.checkTargetForReferral(entry, null);
- }
-
- // Update the referral database.
- dn2uri.deleteEntry(txn, entry);
-
- // Remove from id2entry.
- if (!id2entry.remove(txn, leafID))
- {
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- ERR_MISSING_ID2ENTRY_RECORD.get(leafID));
- }
-
- // Remove from the indexes, in index config order.
- indexRemoveEntry(indexBuffer, entry, leafID);
-
- // Remove the id2c and id2s records for this entry.
- final ByteString leafIDKeyBytes = ByteString.valueOfLong(leafID.longValue());
- id2children.delete(indexBuffer, leafIDKeyBytes);
- id2subtree.delete(indexBuffer, leafIDKeyBytes);
-
- // Iterate up through the superior entries from the target entry.
- boolean isParent = true;
- for (DN parentDN = getParentWithinBase(targetDN); parentDN != null;
- parentDN = getParentWithinBase(parentDN))
- {
- // Read the ID from dn2id.
- EntryID parentID = dn2id.get(txn, parentDN, LockMode.DEFAULT);
- if (parentID == null)
- {
- throw new JebException(ERR_MISSING_DN2ID_RECORD.get(parentDN));
- }
-
- ByteString parentIDBytes = ByteString.valueOfLong(parentID.longValue());
- // Remove from id2children.
- if (isParent)
- {
- id2children.removeID(indexBuffer, parentIDBytes, leafID);
- isParent = false;
- }
- id2subtree.removeID(indexBuffer, parentIDBytes, leafID);
- }
-
- // Remove the entry from the entry cache.
- EntryCache<?> entryCache = DirectoryServer.getEntryCache();
- if (entryCache != null)
- {
- entryCache.removeEntry(entry.getName());
- }
- }
-
- /**
- * Indicates whether an entry with the specified DN exists.
- *
- * @param entryDN The DN of the entry for which to determine existence.
- *
- * @return <CODE>true</CODE> if the specified entry exists,
- * or <CODE>false</CODE> if it does not.
- *
- * @throws DirectoryException If a problem occurs while trying to make the
- * determination.
- */
- boolean entryExists(DN entryDN) throws DirectoryException
- {
- // Try the entry cache first.
- EntryCache<?> entryCache = DirectoryServer.getEntryCache();
- if (entryCache != null && entryCache.containsEntry(entryDN))
- {
- return true;
- }
-
- try
- {
- return dn2id.get(null, entryDN, LockMode.DEFAULT) != null;
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- return false;
- }
- }
-
- /**
- * Fetch an entry by DN, trying the entry cache first, then the database. Retrieves the requested
- * entry, trying the entry cache first, then the database.
- *
- * @param entryDN
- * The distinguished name of the entry to retrieve.
- * @return The requested entry, or <CODE>null</CODE> if the entry does not exist.
- * @throws DirectoryException
- * If a problem occurs while trying to retrieve the entry.
- * @throws DatabaseException
- * An error occurred during a database operation.
- */
- Entry getEntry(DN entryDN) throws DatabaseException, DirectoryException
- {
- EntryCache<?> entryCache = DirectoryServer.getEntryCache();
- if (entryCache != null)
- {
- Entry entry = entryCache.getEntry(entryDN);
- if (entry != null)
- {
- return entry;
- }
- }
-
- EntryID entryID = dn2id.get(null, entryDN, LockMode.DEFAULT);
- if (entryID == null)
- {
- // The entryDN does not exist. Check for referral entries above the target entry.
- dn2uri.targetEntryReferrals(entryDN, null);
- return null;
- }
-
- Entry entry = id2entry.get(null, entryID, LockMode.DEFAULT);
- if (entry != null && entryCache != null)
- {
- /*
- * Put the entry in the cache making sure not to overwrite a newer copy that may have been
- * inserted since the time we read the cache.
- */
- entryCache.putEntryIfAbsent(entry, backendID, entryID.longValue());
- }
- return entry;
- }
-
- /**
- * The simplest case of replacing an entry in which the entry DN has
- * not changed.
- *
- * @param oldEntry The old contents of the entry
- * @param newEntry The new contents of the entry
- * @param modifyOperation The modify operation with which this action is
- * associated. This may be <CODE>null</CODE> for
- * modifications performed internally.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws CanceledOperationException if this operation should be cancelled.
- */
- void replaceEntry(Entry oldEntry, Entry newEntry,
- ModifyOperation modifyOperation) throws DatabaseException,
- DirectoryException, CanceledOperationException
- {
- Transaction txn = beginTransaction();
-
- try
- {
- // Read dn2id.
- EntryID entryID = dn2id.get(txn, newEntry.getName(), LockMode.RMW);
- if (entryID == null)
- {
- // The entry does not exist.
- throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
- ERR_MODIFY_NO_SUCH_OBJECT.get(newEntry.getName()), getMatchedDN(baseDN), null);
- }
-
- if (!isManageDsaITOperation(modifyOperation))
- {
- // Check if the entry is a referral entry.
- dn2uri.checkTargetForReferral(oldEntry, null);
- }
-
- // Update the referral database.
- if (modifyOperation != null)
- {
- // In this case we know from the operation what the modifications were.
- List<Modification> mods = modifyOperation.getModifications();
- dn2uri.modifyEntry(txn, oldEntry, newEntry, mods);
- }
- else
- {
- dn2uri.replaceEntry(txn, oldEntry, newEntry);
- }
-
- // Replace id2entry.
- id2entry.put(txn, entryID, newEntry);
-
- // Update the indexes.
- final IndexBuffer indexBuffer = new IndexBuffer(this);
- if (modifyOperation != null)
- {
- // In this case we know from the operation what the modifications were.
- List<Modification> mods = modifyOperation.getModifications();
- indexModifications(indexBuffer, oldEntry, newEntry, entryID, mods);
- }
- else
- {
- // The most optimal would be to figure out what the modifications were.
- indexRemoveEntry(indexBuffer, oldEntry, entryID);
- indexInsertEntry(indexBuffer, newEntry, entryID);
- }
-
- indexBuffer.flush(txn);
-
- if(modifyOperation != null)
- {
- // One last check before committing
- modifyOperation.checkIfCanceled(true);
- }
-
- // Commit the transaction.
- EntryContainer.transactionCommit(txn);
-
- // Update the entry cache.
- EntryCache<?> entryCache = DirectoryServer.getEntryCache();
- if (entryCache != null)
- {
- entryCache.putEntry(newEntry, backendID, entryID.longValue());
- }
- }
- catch (DatabaseException | DirectoryException | CanceledOperationException e)
- {
- EntryContainer.transactionAbort(txn);
- throw e;
- }
- catch (Exception e)
- {
- EntryContainer.transactionAbort(txn);
-
- String msg = e.getMessage();
- if (msg == null)
- {
- msg = stackTraceToSingleLineString(e);
- }
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- ERR_UNCHECKED_EXCEPTION.get(msg), e);
- }
- }
-
- /**
- * Moves and/or renames the provided entry in this backend, altering any
- * subordinate entries as necessary. This must ensure that an entry already
- * exists with the provided current DN, and that no entry exists with the
- * target DN of the provided entry. The caller must hold write locks on both
- * the current DN and the new DN for the entry.
- *
- * @param currentDN The current DN of the entry to be replaced.
- * @param entry The new content to use for the entry.
- * @param modifyDNOperation The modify DN operation with which this action
- * is associated. This may be <CODE>null</CODE>
- * for modify DN operations performed internally.
- * @throws DirectoryException
- * If a problem occurs while trying to perform the rename.
- * @throws CanceledOperationException
- * If this backend noticed and reacted to a request to cancel
- * or abandon the modify DN operation.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- void renameEntry(DN currentDN, Entry entry, ModifyDNOperation modifyDNOperation)
- throws DatabaseException, DirectoryException, CanceledOperationException
- {
- Transaction txn = beginTransaction();
- DN oldSuperiorDN = getParentWithinBase(currentDN);
- DN newSuperiorDN = getParentWithinBase(entry.getName());
- boolean isApexEntryMoved;
-
- if(oldSuperiorDN != null)
- {
- isApexEntryMoved = ! oldSuperiorDN.equals(newSuperiorDN);
- }
- else if(newSuperiorDN != null)
- {
- isApexEntryMoved = ! newSuperiorDN.equals(oldSuperiorDN);
- }
- else
- {
- isApexEntryMoved = false;
- }
-
- IndexBuffer buffer = new IndexBuffer(EntryContainer.this);
-
- try
- {
- // Check whether the renamed entry already exists.
- if (!currentDN.equals(entry.getName()) &&
- dn2id.get(txn, entry.getName(), LockMode.DEFAULT) != null)
- {
- LocalizableMessage message = ERR_MODIFYDN_ALREADY_EXISTS.get(entry.getName());
- throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS, message);
- }
-
- EntryID oldApexID = dn2id.get(txn, currentDN, LockMode.DEFAULT);
- if (oldApexID == null)
- {
- // Check for referral entries above the target entry.
- dn2uri.targetEntryReferrals(currentDN, null);
-
- throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
- ERR_MODIFYDN_NO_SUCH_OBJECT.get(currentDN), getMatchedDN(baseDN), null);
- }
-
- Entry oldApexEntry = id2entry.get(txn, oldApexID, LockMode.DEFAULT);
- if (oldApexEntry == null)
- {
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- ERR_MISSING_ID2ENTRY_RECORD.get(oldApexID));
- }
-
- if (!isManageDsaITOperation(modifyDNOperation))
- {
- dn2uri.checkTargetForReferral(oldApexEntry, null);
- }
-
- EntryID newApexID = oldApexID;
- if (newSuperiorDN != null && isApexEntryMoved)
- {
- /*
- * We want to preserve the invariant that the ID of an
- * entry is greater than its parent, since search
- * results are returned in ID order.
- */
- EntryID newSuperiorID = dn2id.get(txn, newSuperiorDN, LockMode.DEFAULT);
- if (newSuperiorID == null)
- {
- throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
- ERR_NEW_SUPERIOR_NO_SUCH_OBJECT.get(newSuperiorDN), getMatchedDN(baseDN), null);
- }
-
- if (newSuperiorID.compareTo(oldApexID) > 0)
- {
- // This move would break the above invariant so we must
- // renumber every entry that moves. This is even more
- // expensive since every entry has to be deleted from
- // and added back into the attribute indexes.
- newApexID = rootContainer.getNextEntryID();
-
- if(logger.isTraceEnabled())
- {
- logger.trace("Move of target entry requires renumbering" +
- "all entries in the subtree. " +
- "Old DN: %s " +
- "New DN: %s " +
- "Old entry ID: %d " +
- "New entry ID: %d " +
- "New Superior ID: %d" +
- oldApexEntry.getName(), entry.getName(),
- oldApexID.longValue(), newApexID.longValue(),
- newSuperiorID.longValue());
- }
- }
- }
-
- MovedEntry head = new MovedEntry(null, null, false);
- MovedEntry current = head;
- // Move or rename the apex entry.
- removeApexEntry(txn, buffer, oldSuperiorDN, oldApexID,
- newApexID, oldApexEntry, entry,isApexEntryMoved, modifyDNOperation,
- current);
- current = current.next;
-
- /*
- * We will iterate forwards through a range of the dn2id keys to
- * find subordinates of the target entry from the top of the tree
- * downwards.
- */
- byte[] currentDNKey = dnToDNKey(currentDN, this.baseDN.size());
- byte special = 0x00;
- byte[] suffix = Arrays.copyOf(currentDNKey, currentDNKey.length+1);
- suffix[suffix.length - 1] = special;
-
- /*
- * Set the ending value to a value of equal length but slightly
- * greater than the suffix.
- */
- byte[] end = Arrays.copyOf(suffix, suffix.length);
- end[end.length - 1] = (byte) (special + 1);
-
- DatabaseEntry data = new DatabaseEntry();
- DatabaseEntry key = new DatabaseEntry(suffix);
-
- CursorConfig cursorConfig = new CursorConfig();
- cursorConfig.setReadCommitted(true);
- Cursor cursor = dn2id.openCursor(txn, cursorConfig);
- try
- {
- // Initialize the cursor very close to the starting value.
- OperationStatus status = cursor.getSearchKeyRange(key, data, LockMode.DEFAULT);
-
- // Step forward until the key is greater than the starting value.
- while (status == OperationStatus.SUCCESS &&
- dn2id.getComparator().compare(key.getData(), suffix) <= 0)
- {
- status = cursor.getNext(key, data, LockMode.DEFAULT);
- }
-
- // Step forward until we pass the ending value.
- while (status == OperationStatus.SUCCESS)
- {
- int cmp = dn2id.getComparator().compare(key.getData(), end);
- if (cmp >= 0)
- {
- // We have gone past the ending value.
- break;
- }
-
- // We have found a subordinate entry.
- EntryID oldID = new EntryID(data);
- Entry oldEntry = id2entry.get(txn, oldID, LockMode.DEFAULT);
-
- // Construct the new DN of the entry.
- DN newDN = modDN(oldEntry.getName(),
- currentDN.size(),
- entry.getName());
-
- // Assign a new entry ID if we are renumbering.
- EntryID newID = oldID;
- if (!newApexID.equals(oldApexID))
- {
- newID = rootContainer.getNextEntryID();
-
- if(logger.isTraceEnabled())
- {
- logger.trace("Move of subordinate entry requires " +
- "renumbering. " +
- "Old DN: %s " +
- "New DN: %s " +
- "Old entry ID: %d " +
- "New entry ID: %d",
- oldEntry.getName(), newDN, oldID.longValue(),
- newID.longValue());
- }
- }
-
- // Move this entry.
- removeSubordinateEntry(txn, buffer, oldSuperiorDN,
- oldID, newID, oldEntry, newDN, isApexEntryMoved,
- modifyDNOperation, current);
- current = current.next;
-
- if(modifyDNOperation != null)
- {
- modifyDNOperation.checkIfCanceled(false);
- }
-
- // Get the next DN.
- data = new DatabaseEntry();
- status = cursor.getNext(key, data, LockMode.DEFAULT);
- }
- }
- finally
- {
- cursor.close();
- }
-
- // Set current to the first moved entry and null out the head. This will
- // allow processed moved entries to be GCed.
- current = head.next;
- head = null;
- while(current != null)
- {
- addRenamedEntry(txn, buffer, current.entryID, current.entry,
- isApexEntryMoved, current.renumbered,
- modifyDNOperation);
- current = current.next;
- }
- buffer.flush(txn);
-
- if(modifyDNOperation != null)
- {
- // One last check before committing
- modifyDNOperation.checkIfCanceled(true);
- }
-
- // Commit the transaction.
- EntryContainer.transactionCommit(txn);
- }
- catch (DatabaseException | DirectoryException | CanceledOperationException e)
- {
- EntryContainer.transactionAbort(txn);
- throw e;
- }
- catch (Exception e)
- {
- EntryContainer.transactionAbort(txn);
-
- String msg = e.getMessage();
- if (msg == null)
- {
- msg = stackTraceToSingleLineString(e);
- }
- LocalizableMessage message = ERR_UNCHECKED_EXCEPTION.get(msg);
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- message, e);
- }
- }
-
- /** Represents an renamed entry that was deleted from JE but yet to be added back. */
- private static final class MovedEntry
- {
- private EntryID entryID;
- private Entry entry;
- private MovedEntry next;
- private boolean renumbered;
-
- private MovedEntry(EntryID entryID, Entry entry, boolean renumbered)
- {
- this.entryID = entryID;
- this.entry = entry;
- this.renumbered = renumbered;
- }
- }
-
- private void addRenamedEntry(Transaction txn, IndexBuffer buffer,
- EntryID newID,
- Entry newEntry,
- boolean isApexEntryMoved,
- boolean renumbered,
- ModifyDNOperation modifyDNOperation)
- throws DirectoryException, DatabaseException
- {
- if (!dn2id.insert(txn, newEntry.getName(), newID))
- {
- throw new DirectoryException(
- ResultCode.ENTRY_ALREADY_EXISTS, ERR_MODIFYDN_ALREADY_EXISTS.get(newEntry.getName()));
- }
- id2entry.put(txn, newID, newEntry);
- dn2uri.addEntry(txn, newEntry);
-
- if (renumbered || modifyDNOperation == null)
- {
- // Reindex the entry with the new ID.
- indexInsertEntry(buffer, newEntry, newID);
- }
-
- // Add the new ID to id2children and id2subtree of new apex parent entry.
- if(isApexEntryMoved)
- {
- boolean isParent = true;
- for (DN dn = getParentWithinBase(newEntry.getName()); dn != null;
- dn = getParentWithinBase(dn))
- {
- EntryID parentID = dn2id.get(txn, dn, LockMode.DEFAULT);
- ByteString parentIDKeyBytes = ByteString.valueOfLong(parentID.longValue());
- if(isParent)
- {
- id2children.insertID(buffer, parentIDKeyBytes, newID);
- isParent = false;
- }
- id2subtree.insertID(buffer, parentIDKeyBytes, newID);
- }
- }
- }
-
- private void removeApexEntry(Transaction txn, IndexBuffer buffer,
- DN oldSuperiorDN,
- EntryID oldID, EntryID newID,
- Entry oldEntry, Entry newEntry,
- boolean isApexEntryMoved,
- ModifyDNOperation modifyDNOperation,
- MovedEntry tail)
- throws DirectoryException, DatabaseException
- {
- DN oldDN = oldEntry.getName();
-
- // Remove the old DN from dn2id.
- dn2id.remove(txn, oldDN);
-
- // Remove old ID from id2entry and put the new entry
- // (old entry with new DN) in id2entry.
- if (!newID.equals(oldID))
- {
- id2entry.remove(txn, oldID);
- }
-
- // Update any referral records.
- dn2uri.deleteEntry(txn, oldEntry);
-
- tail.next = new MovedEntry(newID, newEntry, !newID.equals(oldID));
-
- // Remove the old ID from id2children and id2subtree of
- // the old apex parent entry.
- if(oldSuperiorDN != null && isApexEntryMoved)
- {
- boolean isParent = true;
- for (DN dn = oldSuperiorDN; dn != null; dn = getParentWithinBase(dn))
- {
- EntryID parentID = dn2id.get(txn, dn, LockMode.DEFAULT);
- ByteString parentIDKeyBytes = ByteString.valueOfLong(parentID.longValue());
- if(isParent)
- {
- id2children.removeID(buffer, parentIDKeyBytes, oldID);
- isParent = false;
- }
- id2subtree.removeID(buffer, parentIDKeyBytes, oldID);
- }
- }
-
- if (!newID.equals(oldID) || modifyDNOperation == null)
- {
- // All the subordinates will be renumbered so we have to rebuild
- // id2c and id2s with the new ID.
- ByteString oldIDKeyBytes = ByteString.valueOfLong(oldID.longValue());
- id2children.delete(buffer, oldIDKeyBytes);
- id2subtree.delete(buffer, oldIDKeyBytes);
-
- // Reindex the entry with the new ID.
- indexRemoveEntry(buffer, oldEntry, oldID);
- }
- else
- {
- // Update the indexes if needed.
- indexModifications(buffer, oldEntry, newEntry, oldID,
- modifyDNOperation.getModifications());
- }
-
- // Remove the entry from the entry cache.
- EntryCache<?> entryCache = DirectoryServer.getEntryCache();
- if (entryCache != null)
- {
- entryCache.removeEntry(oldDN);
- }
- }
-
- private void removeSubordinateEntry(Transaction txn, IndexBuffer buffer,
- DN oldSuperiorDN,
- EntryID oldID, EntryID newID,
- Entry oldEntry, DN newDN,
- boolean isApexEntryMoved,
- ModifyDNOperation modifyDNOperation,
- MovedEntry tail)
- throws DirectoryException, DatabaseException
- {
- DN oldDN = oldEntry.getName();
- Entry newEntry = oldEntry.duplicate(false);
- newEntry.setDN(newDN);
- List<Modification> modifications =
- Collections.unmodifiableList(new ArrayList<Modification>(0));
-
- // Create a new entry that is a copy of the old entry but with the new DN.
- // Also invoke any subordinate modify DN plugins on the entry.
- // FIXME -- At the present time, we don't support subordinate modify DN
- // plugins that make changes to subordinate entries and therefore
- // provide an unmodifiable list for the modifications element.
- // FIXME -- This will need to be updated appropriately if we decided that
- // these plugins should be invoked for synchronization
- // operations.
- if (modifyDNOperation != null && !modifyDNOperation.isSynchronizationOperation())
- {
- SubordinateModifyDN pluginResult =
- getPluginConfigManager().invokeSubordinateModifyDNPlugins(
- modifyDNOperation, oldEntry, newEntry, modifications);
-
- if (!pluginResult.continueProcessing())
- {
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
- ERR_MODIFYDN_ABORTED_BY_SUBORDINATE_PLUGIN.get(oldDN, newDN));
- }
-
- if (! modifications.isEmpty())
- {
- LocalizableMessageBuilder invalidReason = new LocalizableMessageBuilder();
- if (! newEntry.conformsToSchema(null, false, false, false,
- invalidReason))
- {
- throw new DirectoryException(
- DirectoryServer.getServerErrorResultCode(),
- ERR_MODIFYDN_ABORTED_BY_SUBORDINATE_SCHEMA_ERROR.get(oldDN, newDN, invalidReason));
- }
- }
- }
-
- // Remove the old DN from dn2id.
- dn2id.remove(txn, oldDN);
-
- // Remove old ID from id2entry and put the new entry
- // (old entry with new DN) in id2entry.
- if (!newID.equals(oldID))
- {
- id2entry.remove(txn, oldID);
- }
-
- // Update any referral records.
- dn2uri.deleteEntry(txn, oldEntry);
-
- tail.next = new MovedEntry(newID, newEntry, !newID.equals(oldID));
-
- if(isApexEntryMoved)
- {
- // Remove the old ID from id2subtree of old apex superior entries.
- for (DN dn = oldSuperiorDN; dn != null; dn = getParentWithinBase(dn))
- {
- EntryID parentID = dn2id.get(txn, dn, LockMode.DEFAULT);
- ByteString parentIDKeyBytes = ByteString.valueOfLong(parentID.longValue());
- id2subtree.removeID(buffer, parentIDKeyBytes, oldID);
- }
- }
-
- if (!newID.equals(oldID))
- {
- // All the subordinates will be renumbered so we have to rebuild
- // id2c and id2s with the new ID.
- ByteString oldIDKeyBytes = ByteString.valueOfLong(oldID.longValue());
- id2children.delete(buffer, oldIDKeyBytes);
- id2subtree.delete(buffer, oldIDKeyBytes);
-
- // Reindex the entry with the new ID.
- indexRemoveEntry(buffer, oldEntry, oldID);
- }
- else if (!modifications.isEmpty())
- {
- // Update the indexes.
- indexModifications(buffer, oldEntry, newEntry, oldID, modifications);
- }
-
- // Remove the entry from the entry cache.
- EntryCache<?> entryCache = DirectoryServer.getEntryCache();
- if (entryCache != null)
- {
- entryCache.removeEntry(oldDN);
- }
- }
-
- /**
- * Make a new DN for a subordinate entry of a renamed or moved entry.
- *
- * @param oldDN The current DN of the subordinate entry.
- * @param oldSuffixLen The current DN length of the renamed or moved entry.
- * @param newSuffixDN The new DN of the renamed or moved entry.
- * @return The new DN of the subordinate entry.
- */
- static DN modDN(DN oldDN, int oldSuffixLen, DN newSuffixDN)
- {
- int oldDNNumComponents = oldDN.size();
- int oldDNKeepComponents = oldDNNumComponents - oldSuffixLen;
- int newSuffixDNComponents = newSuffixDN.size();
-
- RDN[] newDNComponents = new RDN[oldDNKeepComponents+newSuffixDNComponents];
- for (int i=0; i < oldDNKeepComponents; i++)
- {
- newDNComponents[i] = oldDN.getRDN(i);
- }
-
- for (int i=oldDNKeepComponents, j=0; j < newSuffixDNComponents; i++,j++)
- {
- newDNComponents[i] = newSuffixDN.getRDN(j);
- }
-
- return new DN(newDNComponents);
- }
-
- /**
- * Insert a new entry into the attribute indexes.
- *
- * @param buffer The index buffer used to buffer up the index changes.
- * @param entry The entry to be inserted into the indexes.
- * @param entryID The ID of the entry to be inserted into the indexes.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- private void indexInsertEntry(IndexBuffer buffer, Entry entry, EntryID entryID)
- throws DatabaseException, DirectoryException
- {
- for (AttributeIndex index : attrIndexMap.values())
- {
- index.addEntry(buffer, entryID, entry);
- }
-
- for (VLVIndex vlvIndex : vlvIndexMap.values())
- {
- vlvIndex.addEntry(buffer, entryID, entry);
- }
- }
-
- /**
- * Remove an entry from the attribute indexes.
- *
- * @param buffer The index buffer used to buffer up the index changes.
- * @param entry The entry to be removed from the indexes.
- * @param entryID The ID of the entry to be removed from the indexes.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- private void indexRemoveEntry(IndexBuffer buffer, Entry entry, EntryID entryID)
- throws DatabaseException, DirectoryException
- {
- for (AttributeIndex index : attrIndexMap.values())
- {
- index.removeEntry(buffer, entryID, entry);
- }
-
- for (VLVIndex vlvIndex : vlvIndexMap.values())
- {
- vlvIndex.removeEntry(buffer, entryID, entry);
- }
- }
-
- /**
- * Update the attribute indexes to reflect the changes to the
- * attributes of an entry resulting from a sequence of modifications.
- *
- * @param buffer The index buffer used to buffer up the index changes.
- * @param oldEntry The contents of the entry before the change.
- * @param newEntry The contents of the entry after the change.
- * @param entryID The ID of the entry that was changed.
- * @param mods The sequence of modifications made to the entry.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- private void indexModifications(IndexBuffer buffer, Entry oldEntry, Entry newEntry,
- EntryID entryID, List<Modification> mods)
- throws DatabaseException, DirectoryException
- {
- // Process in index configuration order.
- for (AttributeIndex index : attrIndexMap.values())
- {
- // Check whether any modifications apply to this indexed attribute.
- if (isAttributeModified(index, mods))
- {
- index.modifyEntry(buffer, entryID, oldEntry, newEntry, mods);
- }
- }
-
- for(VLVIndex vlvIndex : vlvIndexMap.values())
- {
- vlvIndex.modifyEntry(buffer, entryID, oldEntry, newEntry, mods);
- }
- }
-
- /**
- * Get a count of the number of entries stored in this entry container.
- *
- * @return The number of entries stored in this entry container.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public long getEntryCount() throws DatabaseException
- {
- EntryID entryID = dn2id.get(null, baseDN, LockMode.DEFAULT);
- if (entryID != null)
- {
- DatabaseEntry key = new DatabaseEntry(entryIDToDatabase(entryID.longValue()));
- EntryIDSet entryIDSet = id2subtree.readKey(key, null, LockMode.DEFAULT);
-
- long count = entryIDSet.size();
- if(count != Long.MAX_VALUE)
- {
- // Add the base entry itself
- return ++count;
- }
- else
- {
- // The count is not maintained. Fall back to the slow method
- return id2entry.getRecordCount();
- }
- }
- else
- {
- // Base entry doesn't not exist so this entry container
- // must not have any entries
- return 0;
- }
- }
-
- /**
- * Get the number of values for which the entry limit has been exceeded
- * since the entry container was opened.
- * @return The number of values for which the entry limit has been exceeded.
- */
- public int getEntryLimitExceededCount()
- {
- int count = 0;
- count += id2children.getEntryLimitExceededCount();
- count += id2subtree.getEntryLimitExceededCount();
- for (AttributeIndex index : attrIndexMap.values())
- {
- count += index.getEntryLimitExceededCount();
- }
- return count;
- }
-
-
- /**
- * Get a list of the databases opened by the entryContainer.
- * @param dbList A list of database containers.
- */
- public void listDatabases(List<DatabaseContainer> dbList)
- {
- dbList.add(dn2id);
- dbList.add(id2entry);
- dbList.add(dn2uri);
- if (config.isSubordinateIndexesEnabled())
- {
- dbList.add(id2children);
- dbList.add(id2subtree);
- }
- dbList.add(state);
-
- for(AttributeIndex index : attrIndexMap.values())
- {
- index.listDatabases(dbList);
- }
-
- dbList.addAll(vlvIndexMap.values());
- }
-
- /**
- * Determine whether the provided operation has the ManageDsaIT request
- * control.
- * @param operation The operation for which the determination is to be made.
- * @return true if the operation has the ManageDsaIT request control, or false
- * if not.
- */
- private static boolean isManageDsaITOperation(Operation operation)
- {
- if(operation != null)
- {
- List<Control> controls = operation.getRequestControls();
- if (controls != null)
- {
- for (Control control : controls)
- {
- if (ServerConstants.OID_MANAGE_DSAIT_CONTROL.equals(control.getOID()))
- {
- return true;
- }
- }
- }
- }
- return false;
- }
-
- /**
- * Begin a leaf transaction using the default configuration.
- * Provides assertion debug logging.
- * @return A JE transaction handle.
- * @throws DatabaseException If an error occurs while attempting to begin
- * a new transaction.
- */
- public Transaction beginTransaction()
- throws DatabaseException
- {
- Transaction parentTxn = null;
- TransactionConfig txnConfig = null;
- Transaction txn = env.beginTransaction(parentTxn, txnConfig);
- if (logger.isTraceEnabled())
- {
- logger.trace("beginTransaction", "begin txnid=" + txn.getId());
- }
- return txn;
- }
-
- /**
- * Commit a transaction.
- * Provides assertion debug logging.
- * @param txn The JE transaction handle.
- * @throws DatabaseException If an error occurs while attempting to commit
- * the transaction.
- */
- public static void transactionCommit(Transaction txn)
- throws DatabaseException
- {
- if (txn != null)
- {
- txn.commit();
- if (logger.isTraceEnabled())
- {
- logger.trace("commit txnid=%d", txn.getId());
- }
- }
- }
-
- /**
- * Abort a transaction.
- * Provides assertion debug logging.
- * @param txn The JE transaction handle.
- * @throws DatabaseException If an error occurs while attempting to abort the
- * transaction.
- */
- public static void transactionAbort(Transaction txn)
- throws DatabaseException
- {
- if (txn != null)
- {
- txn.abort();
- if (logger.isTraceEnabled())
- {
- logger.trace("abort txnid=%d", txn.getId());
- }
- }
- }
-
- /**
- * Delete this entry container from disk. The entry container should be
- * closed before calling this method.
- *
- * @throws DatabaseException If an error occurs while removing the entry
- * container.
- */
- void delete() throws DatabaseException
- {
- List<DatabaseContainer> databases = new ArrayList<>();
- listDatabases(databases);
-
- if(env.getConfig().getTransactional())
- {
- Transaction txn = beginTransaction();
-
- try
- {
- for(DatabaseContainer db : databases)
- {
- env.removeDatabase(txn, db.getName());
- }
-
- transactionCommit(txn);
- }
- catch(DatabaseException de)
- {
- transactionAbort(txn);
- throw de;
- }
- }
- else
- {
- for(DatabaseContainer db : databases)
- {
- env.removeDatabase(null, db.getName());
- }
- }
- }
-
- /**
- * Remove a database from disk.
- *
- * @param database The database container to remove.
- * @throws DatabaseException If an error occurs while attempting to delete the
- * database.
- */
- void deleteDatabase(DatabaseContainer database)
- throws DatabaseException
- {
- if(database == state)
- {
- // The state database can not be removed individually.
- return;
- }
-
- database.close();
- if(env.getConfig().getTransactional())
- {
- Transaction txn = beginTransaction();
- try
- {
- env.removeDatabase(txn, database.getName());
- if(database instanceof Index)
- {
- state.removeIndexTrustState(txn, database);
- }
- transactionCommit(txn);
- }
- catch(DatabaseException de)
- {
- transactionAbort(txn);
- throw de;
- }
- }
- else
- {
- env.removeDatabase(null, database.getName());
- if(database instanceof Index)
- {
- state.removeIndexTrustState(null, database);
- }
- }
- }
-
- /**
- * Removes a attribute index from disk.
- *
- * @param attributeIndex The attribute index to remove.
- * @throws DatabaseException If an JE database error occurs while attempting
- * to delete the index.
- */
- private void deleteAttributeIndex(AttributeIndex attributeIndex)
- throws DatabaseException
- {
- attributeIndex.close();
- Transaction txn = env.getConfig().getTransactional()
- ? beginTransaction() : null;
- try
- {
- for (Index index : attributeIndex.getAllIndexes())
- {
- env.removeDatabase(txn, index.getName());
- state.removeIndexTrustState(txn, index);
- }
- if (txn != null)
- {
- transactionCommit(txn);
- }
- }
- catch(DatabaseException de)
- {
- if (txn != null)
- {
- transactionAbort(txn);
- }
- throw de;
- }
- }
-
- /**
- * This method constructs a container name from a base DN. Only alphanumeric
- * characters are preserved, all other characters are replaced with an
- * underscore.
- *
- * @return The container name for the base DN.
- */
- public String getDatabasePrefix()
- {
- return databasePrefix;
- }
-
- /**
- * Sets a new database prefix for this entry container and rename all
- * existing databases in use by this entry container.
- *
- * @param newDatabasePrefix The new database prefix to use.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws JebException If an error occurs in the JE backend.
- */
- public void setDatabasePrefix(String newDatabasePrefix)
- throws DatabaseException, JebException
-
- {
- List<DatabaseContainer> databases = new ArrayList<>();
- listDatabases(databases);
-
- newDatabasePrefix = preparePrefix(newDatabasePrefix);
-
- // close the containers.
- for(DatabaseContainer db : databases)
- {
- db.close();
- }
-
- try
- {
- if(env.getConfig().getTransactional())
- {
- //Rename under transaction
- Transaction txn = beginTransaction();
- try
- {
- for(DatabaseContainer db : databases)
- {
- String oldName = db.getName();
- String newName = oldName.replace(databasePrefix, newDatabasePrefix);
- env.renameDatabase(txn, oldName, newName);
- }
-
- transactionCommit(txn);
-
- for(DatabaseContainer db : databases)
- {
- String oldName = db.getName();
- String newName = oldName.replace(databasePrefix, newDatabasePrefix);
- db.setName(newName);
- }
-
- // Update the prefix.
- this.databasePrefix = newDatabasePrefix;
- }
- catch(Exception e)
- {
- transactionAbort(txn);
-
- String msg = e.getMessage();
- if (msg == null)
- {
- msg = stackTraceToSingleLineString(e);
- }
- throw new JebException(ERR_UNCHECKED_EXCEPTION.get(msg), e);
- }
- }
- else
- {
- for(DatabaseContainer db : databases)
- {
- String oldName = db.getName();
- String newName = oldName.replace(databasePrefix, newDatabasePrefix);
- env.renameDatabase(null, oldName, newName);
- db.setName(newName);
- }
-
- // Update the prefix.
- this.databasePrefix = newDatabasePrefix;
- }
- }
- finally
- {
- // Open the containers backup.
- for(DatabaseContainer db : databases)
- {
- db.open();
- }
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public DN getBaseDN()
- {
- return baseDN;
- }
-
- /**
- * Get the parent of a DN in the scope of the base DN.
- *
- * @param dn A DN which is in the scope of the base DN.
- * @return The parent DN, or null if the given DN is the base DN.
- */
- DN getParentWithinBase(DN dn)
- {
- if (dn.equals(baseDN))
- {
- return null;
- }
- return dn.parent();
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationChangeAcceptable(
- LocalDBBackendCfg cfg, List<LocalizableMessage> unacceptableReasons)
- {
- // This is always true because only all config attributes used
- // by the entry container should be validated by the admin framework.
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public ConfigChangeResult applyConfigurationChange(LocalDBBackendCfg cfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
-
- exclusiveLock.lock();
- try
- {
- if (config.isSubordinateIndexesEnabled() != cfg.isSubordinateIndexesEnabled())
- {
- if (cfg.isSubordinateIndexesEnabled())
- {
- // Re-enabling subordinate indexes.
- openSubordinateIndexes();
- }
- else
- {
- // Disabling subordinate indexes. Use a null index and ensure that
- // future attempts to use the real indexes will fail.
- id2children.close();
- id2children = new NullIndex(databasePrefix + "_"
- + ID2CHILDREN_DATABASE_NAME, new ID2CIndexer(), state, env, this);
- state.putIndexTrustState(null, id2children, false);
- id2children.open(); // No-op
-
- id2subtree.close();
- id2subtree = new NullIndex(databasePrefix + "_"
- + ID2SUBTREE_DATABASE_NAME, new ID2SIndexer(), state, env, this);
- state.putIndexTrustState(null, id2subtree, false);
- id2subtree.open(); // No-op
-
- logger.info(NOTE_JEB_SUBORDINATE_INDEXES_DISABLED, cfg.getBackendId());
- }
- }
-
- if (config.getIndexEntryLimit() != cfg.getIndexEntryLimit())
- {
- if (id2children.setIndexEntryLimit(cfg.getIndexEntryLimit()))
- {
- ccr.setAdminActionRequired(true);
- ccr.addMessage(NOTE_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(id2children.getName()));
- }
-
- if (id2subtree.setIndexEntryLimit(cfg.getIndexEntryLimit()))
- {
- ccr.setAdminActionRequired(true);
- ccr.addMessage(NOTE_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(id2subtree.getName()));
- }
- }
-
- DataConfig entryDataConfig = new DataConfig(cfg.isEntriesCompressed(),
- cfg.isCompactEncoding(), rootContainer.getCompressedSchema());
- id2entry.setDataConfig(entryDataConfig);
-
- this.config = cfg;
- }
- catch (DatabaseException e)
- {
- ccr.addMessage(LocalizableMessage.raw(stackTraceToSingleLineString(e)));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- }
- finally
- {
- exclusiveLock.unlock();
- }
-
- return ccr;
- }
-
- /**
- * Get the environment config of the JE environment used in this entry
- * container.
- *
- * @return The environment config of the JE environment.
- * @throws DatabaseException If an error occurs while retrieving the
- * configuration object.
- */
- public EnvironmentConfig getEnvironmentConfig() throws DatabaseException
- {
- return env.getConfig();
- }
-
- /**
- * Clear the contents of this entry container.
- *
- * @throws DatabaseException If an error occurs while removing the entry
- * container.
- */
- public void clear() throws DatabaseException
- {
- List<DatabaseContainer> databases = new ArrayList<>();
- listDatabases(databases);
-
- for(DatabaseContainer db : databases)
- {
- db.close();
- }
- try
- {
- if(env.getConfig().getTransactional())
- {
- Transaction txn = beginTransaction();
-
- try
- {
- for(DatabaseContainer db : databases)
- {
- env.truncateDatabase(txn, db.getName(), false);
- }
-
- transactionCommit(txn);
- }
- catch(DatabaseException de)
- {
- transactionAbort(txn);
- throw de;
- }
- }
- else
- {
- for(DatabaseContainer db : databases)
- {
- env.truncateDatabase(null, db.getName(), false);
- }
- }
- }
- finally
- {
- for(DatabaseContainer db : databases)
- {
- db.open();
- }
-
- Transaction txn = null;
- try
- {
- if(env.getConfig().getTransactional()) {
- txn = beginTransaction();
- }
- for(DatabaseContainer db : databases)
- {
- if (db instanceof Index)
- {
- Index index = (Index)db;
- index.setTrusted(txn, true);
- }
- }
- if(env.getConfig().getTransactional()) {
- transactionCommit(txn);
- }
- }
- catch(Exception de)
- {
- logger.traceException(de);
-
- // This is mainly used during the unit tests, so it's not essential.
- try
- {
- if (txn != null)
- {
- transactionAbort(txn);
- }
- }
- catch (Exception e)
- {
- logger.traceException(de);
- }
- }
- }
- }
-
- /**
- * Clear the contents for a database from disk.
- *
- * @param database The database to clear.
- * @throws DatabaseException if a JE database error occurs.
- */
- public void clearDatabase(DatabaseContainer database)
- throws DatabaseException
- {
- database.close();
- try
- {
- if(env.getConfig().getTransactional())
- {
- Transaction txn = beginTransaction();
- try
- {
- env.removeDatabase(txn, database.getName());
- transactionCommit(txn);
- }
- catch(DatabaseException de)
- {
- transactionAbort(txn);
- throw de;
- }
- }
- else
- {
- env.removeDatabase(null, database.getName());
- }
- }
- finally
- {
- database.open();
- }
- if(logger.isTraceEnabled())
- {
- logger.trace("Cleared the database %s", database.getName());
- }
- }
-
-
- /**
- * Finds an existing entry whose DN is the closest ancestor of a given baseDN.
- *
- * @param baseDN the DN for which we are searching a matched DN.
- * @return the DN of the closest ancestor of the baseDN.
- * @throws DirectoryException If an error prevented the check of an
- * existing entry from being performed.
- */
- private DN getMatchedDN(DN baseDN) throws DirectoryException
- {
- DN parentDN = baseDN.getParentDNInSuffix();
- while (parentDN != null && parentDN.isDescendantOf(getBaseDN()))
- {
- if (entryExists(parentDN))
- {
- return parentDN;
- }
- parentDN = parentDN.getParentDNInSuffix();
- }
- return null;
- }
-
- /**
- * Opens the id2children and id2subtree indexes.
- */
- private void openSubordinateIndexes()
- {
- id2children = newIndex(ID2CHILDREN_DATABASE_NAME, new ID2CIndexer());
- id2subtree = newIndex(ID2SUBTREE_DATABASE_NAME, new ID2SIndexer());
- }
-
- private Index newIndex(String name, Indexer indexer)
- {
- final Index index = new Index(databasePrefix + "_" + name,
- indexer, state, config.getIndexEntryLimit(), 0, true, env, this);
- index.open();
- if (!index.isTrusted())
- {
- logger.info(NOTE_INDEX_ADD_REQUIRES_REBUILD, index.getName());
- }
- return index;
- }
-
- /**
- * Creates a new index for an attribute.
- *
- * @param indexName the name to give to the new index
- * @param indexer the indexer to use when inserting data into the index
- * @param indexEntryLimit the index entry limit
- * @return a new index
- */
- Index newIndexForAttribute(String indexName, Indexer indexer, int indexEntryLimit)
- {
- final int cursorEntryLimit = 100000;
- return new Index(indexName, indexer, state, indexEntryLimit, cursorEntryLimit, false, env, this);
- }
-
-
- /**
- * Checks if any modifications apply to this indexed attribute.
- * @param index the indexed attributes.
- * @param mods the modifications to check for.
- * @return true if any apply, false otherwise.
- */
- private boolean isAttributeModified(AttributeIndex index,
- List<Modification> mods)
- {
- AttributeType indexAttributeType = index.getAttributeType();
- List<AttributeType> subTypes =
- DirectoryServer.getSchema().getSubTypes(indexAttributeType);
-
- for (Modification mod : mods)
- {
- Attribute modAttr = mod.getAttribute();
- AttributeType modAttrType = modAttr.getAttributeType();
- if (modAttrType.equals(indexAttributeType)
- || subTypes.contains(modAttrType))
- {
- return true;
- }
- }
- return false;
- }
-
-
- /**
- * Fetch the base Entry of the EntryContainer.
- * @param baseDN the DN for the base entry
- * @param searchScope the scope under which this is fetched.
- * Scope is used for referral processing.
- * @return the Entry matching the baseDN.
- * @throws DirectoryException if the baseDN doesn't exist.
- */
- private Entry fetchBaseEntry(DN baseDN, SearchScope searchScope)
- throws DirectoryException
- {
- // Fetch the base entry.
- Entry baseEntry = null;
- try
- {
- baseEntry = getEntry(baseDN);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- }
-
- // The base entry must exist for a successful result.
- if (baseEntry == null)
- {
- // Check for referral entries above the base entry.
- dn2uri.targetEntryReferrals(baseDN, searchScope);
-
- throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
- ERR_SEARCH_NO_SUCH_OBJECT.get(baseDN), getMatchedDN(baseDN), null);
- }
-
- return baseEntry;
- }
-
-
- /**
- * Transform a database prefix string to one usable by the DB.
- * @param databasePrefix the database prefix
- * @return a new string when non letter or digit characters
- * have been replaced with underscore
- */
- private String preparePrefix(String databasePrefix)
- {
- StringBuilder builder = new StringBuilder(databasePrefix.length());
- for (int i = 0; i < databasePrefix.length(); i++)
- {
- char ch = databasePrefix.charAt(i);
- if (Character.isLetterOrDigit(ch))
- {
- builder.append(ch);
- }
- else
- {
- builder.append('_');
- }
- }
- return builder.toString();
- }
-
- /** Get the exclusive lock. */
- public void lock() {
- exclusiveLock.lock();
- }
-
- /** Unlock the exclusive lock. */
- public void unlock() {
- exclusiveLock.unlock();
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString() {
- return databasePrefix;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryID.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryID.java
deleted file mode 100644
index 620673d..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryID.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import com.sleepycat.je.DatabaseEntry;
-
-/**
- * An integer identifier assigned to each entry in the JE backend.
- * An entry ID is implemented by this class as a long.
- * There are static methods to assign monotonically increasing entry IDs,
- * starting from 1.
- */
-public class EntryID implements Comparable<EntryID>
-{
- /** The identifier integer value. */
- private final long id;
- /** The value in database format, created when necessary. */
- private DatabaseEntry data;
-
- /**
- * Create a new entry ID object from a given long value.
- * @param id The long value of the ID.
- */
- public EntryID(long id)
- {
- this.id = id;
- }
-
- /**
- * Create a new entry ID object from a value in database format.
- * @param databaseEntry The database value of the ID.
- */
- public EntryID(DatabaseEntry databaseEntry)
- {
- data = databaseEntry;
- id = JebFormat.entryIDFromDatabase(data.getData());
- }
-
- /**
- * Get the value of the entry ID as a long.
- * @return The entry ID.
- */
- public long longValue()
- {
- return id;
- }
-
- /**
- * Get the value of the ID in database format.
- * @return The value of the ID in database format.
- */
- public DatabaseEntry getDatabaseEntry()
- {
- if (data == null)
- {
- data = new DatabaseEntry();
- data.setData(JebFormat.entryIDToDatabase(id));
- }
- return data;
- }
-
- /**
- * Compares this object with the specified object for order. Returns a
- * negative integer, zero, or a positive integer as this object is less
- * than, equal to, or greater than the specified object.<p>
- * <p/>
- *
- * @param that the Object to be compared.
- * @return a negative integer, zero, or a positive integer as this object
- * is less than, equal to, or greater than the specified object.
- * @throws ClassCastException if the specified object's type prevents it
- * from being compared to this Object.
- */
- @Override
- public int compareTo(EntryID that) throws ClassCastException
- {
- final long result = this.id - that.id;
- if (result < 0)
- {
- return -1;
- }
- else if (result > 0)
- {
- return 1;
- }
- return 0;
- }
-
- /**
- * Indicates whether some other object is "equal to" this one.
- *
- * @param that the reference object with which to compare.
- * @return <code>true</code> if this object is the same as the obj
- * argument; <code>false</code> otherwise.
- * @see #hashCode()
- * @see java.util.Hashtable
- */
- @Override
- public boolean equals(Object that)
- {
- if (this == that)
- {
- return true;
- }
- if (!(that instanceof EntryID))
- {
- return false;
- }
- return this.id == ((EntryID) that).id;
- }
-
- /**
- * Returns a hash code value for the object. This method is
- * supported for the benefit of hashtables such as those provided by
- * <code>java.util.Hashtable</code>.
- *
- * @return a hash code value for this object.
- * @see java.lang.Object#equals(java.lang.Object)
- * @see java.util.Hashtable
- */
- @Override
- public int hashCode()
- {
- return (int) id;
- }
-
- /**
- * Get a string representation of this object.
- * @return A string representation of this object.
- */
- @Override
- public String toString()
- {
- return Long.toString(id);
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryIDSet.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryIDSet.java
deleted file mode 100644
index ceeb121..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryIDSet.java
+++ /dev/null
@@ -1,675 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-
-import org.forgerock.opendj.ldap.ByteString;
-
-/**
- * Represents a set of Entry IDs. It can represent a set where the IDs are
- * not defined, for example when the index entry limit has been exceeded.
- */
-public class EntryIDSet implements Iterable<EntryID>
-{
-
- /**
- * The IDs are stored here in an array in ascending order.
- * A null array implies not defined, rather than zero IDs.
- */
- private long[] values;
-
- /**
- * The size of the set when it is not defined. This value is only maintained
- * when the set is undefined.
- */
- private long undefinedSize = Long.MAX_VALUE;
-
- /**
- * The database key containing this set, if the set was constructed
- * directly from the database.
- */
- private final ByteString keyBytes;
-
- /** Create a new undefined set. */
- public EntryIDSet()
- {
- this.keyBytes = null;
- this.undefinedSize = Long.MAX_VALUE;
- }
-
- /**
- * Create a new undefined set with a initial size.
- *
- * @param size The undefined size for this set.
- */
- public EntryIDSet(long size)
- {
- this.keyBytes = null;
- this.undefinedSize = size;
- }
-
- /**
- * Create a new entry ID set from the raw database value.
- *
- * @param keyBytes The database key that contains this value.
- * @param bytes The database value, or null if there are no entry IDs.
- */
- public EntryIDSet(byte[] keyBytes, byte[] bytes)
- {
- this(keyBytes != null ? ByteString.wrap(keyBytes) : null,
- bytes != null ? ByteString.wrap(bytes) : null);
- }
-
- /**
- * Create a new entry ID set from the raw database value.
- *
- * @param keyBytes
- * The database key that contains this value.
- * @param bytes
- * The database value, or null if there are no entry IDs.
- */
- public EntryIDSet(ByteString keyBytes, ByteString bytes)
- {
- this.keyBytes = keyBytes;
-
- if (bytes == null)
- {
- values = new long[0];
- return;
- }
-
- if (bytes.length() == 0)
- {
- // Entry limit has exceeded and there is no encoded undefined set size.
- undefinedSize = Long.MAX_VALUE;
- }
- else if ((bytes.byteAt(0) & 0x80) == 0x80)
- {
- // Entry limit has exceeded and there is an encoded undefined set size.
- undefinedSize =
- JebFormat.entryIDUndefinedSizeFromDatabase(bytes.toByteArray());
- }
- else
- {
- // Seems like entry limit has not been exceeded and the bytes is a
- // list of entry IDs.
- values = JebFormat.entryIDListFromDatabase(bytes.toByteArray());
- }
- }
-
- /**
- * Construct an EntryIDSet from an array of longs.
- *
- * @param values The array of IDs represented as longs.
- * @param pos The position of the first ID to take from the array.
- * @param len the number of IDs to take from the array.
- */
- EntryIDSet(long[] values, int pos, int len)
- {
- this.keyBytes = null;
- this.values = new long[len];
- System.arraycopy(values, pos, this.values, 0, len);
- }
-
- /**
- * Create a new set of entry IDs that is the union of several entry ID sets.
- *
- * @param sets A list of entry ID sets.
- * @param allowDuplicates true if duplicate IDs are allowed in the resulting
- * set, or if the provided sets are sure not to overlap; false if
- * duplicates should be eliminated.
- * @return The union of the provided entry ID sets.
- */
- public static EntryIDSet unionOfSets(ArrayList<EntryIDSet> sets,
- boolean allowDuplicates)
- {
- int count = 0;
-
- boolean undefined = false;
- for (EntryIDSet l : sets)
- {
- if (!l.isDefined())
- {
- if(l.undefinedSize == Long.MAX_VALUE)
- {
- return new EntryIDSet();
- }
- undefined = true;
- }
- count += l.size();
- }
-
- if(undefined)
- {
- return new EntryIDSet(count);
- }
-
- boolean needSort = false;
- long[] n = new long[count];
- int pos = 0;
- for (EntryIDSet l : sets)
- {
- if (l.values.length != 0)
- {
- if (!needSort && pos > 0 && l.values[0] < n[pos-1])
- {
- needSort = true;
- }
- System.arraycopy(l.values, 0, n, pos, l.values.length);
- pos += l.values.length;
- }
- }
- if (needSort)
- {
- Arrays.sort(n);
- }
- if (allowDuplicates)
- {
- EntryIDSet ret = new EntryIDSet();
- ret.values = n;
- return ret;
- }
- long[] n1 = new long[n.length];
- long last = -1;
- int j = 0;
- for (long l : n)
- {
- if (l != last)
- {
- last = n1[j++] = l;
- }
- }
- if (j == n1.length)
- {
- EntryIDSet ret = new EntryIDSet();
- ret.values = n1;
- return ret;
- }
- else
- {
- return new EntryIDSet(n1, 0, j);
- }
- }
-
- /**
- * Get the size of this entry ID set.
- *
- * @return The number of IDs in the set.
- */
- public long size()
- {
- if (values != null)
- {
- return values.length;
- }
- return undefinedSize;
- }
-
- /**
- * Get a string representation of this object.
- * @return A string representation of this object.
- */
- @Override
- public String toString()
- {
- StringBuilder buffer = new StringBuilder(16);
- toString(buffer);
- return buffer.toString();
- }
-
- /**
- * Convert to a short string to aid with debugging.
- *
- * @param buffer The string is appended to this string builder.
- */
- public void toString(StringBuilder buffer)
- {
- if (!isDefined())
- {
- if (keyBytes != null)
- {
- // The index entry limit was exceeded
- if(undefinedSize == Long.MAX_VALUE)
- {
- buffer.append("[LIMIT-EXCEEDED]");
- }
- else
- {
- buffer.append("[LIMIT-EXCEEDED:");
- buffer.append(undefinedSize);
- buffer.append("]");
- }
- }
- else
- {
- // Not indexed
- buffer.append("[NOT-INDEXED]");
- }
- }
- else
- {
- buffer.append("[COUNT:");
- buffer.append(size());
- buffer.append("]");
- }
- }
-
- /**
- * Determine whether this set of IDs is defined.
- *
- * @return true if the set of IDs is defined.
- */
- public boolean isDefined()
- {
- return values != null;
- }
-
- /**
- * Get a database representation of this object.
- * @return A database representation of this object as a byte array.
- */
- public byte[] toDatabase()
- {
- if(isDefined())
- {
- return JebFormat.entryIDListToDatabase(values);
- }
- else
- {
- return JebFormat.entryIDUndefinedSizeToDatabase(undefinedSize);
- }
- }
-
- /**
- * Insert an ID into this set.
- *
- * @param entryID The ID to be inserted.
- * @return true if the set was changed, false if it was not changed,
- * for example if the set is undefined or the ID was already present.
- */
- public boolean add(EntryID entryID)
- {
- if (values == null)
- {
- if(undefinedSize != Long.MAX_VALUE)
- {
- undefinedSize++;
- }
- return true;
- }
-
- long id = entryID.longValue();
- if (values.length == 0)
- {
- values = new long[] { id };
- return true;
- }
-
- if (id > values[values.length-1])
- {
- long[] updatedValues = Arrays.copyOf(values, values.length + 1);
- updatedValues[values.length] = id;
- values = updatedValues;
- }
- else
- {
- int pos = Arrays.binarySearch(values, id);
- if (pos >= 0)
- {
- // The ID is already present.
- return false;
- }
-
- // For a negative return value r, the index -(r+1) gives the array
- // index at which the specified value can be inserted to maintain
- // the sorted order of the array.
- pos = -(pos+1);
-
- long[] updatedValues = new long[values.length+1];
- System.arraycopy(values, 0, updatedValues, 0, pos);
- System.arraycopy(values, pos, updatedValues, pos+1, values.length-pos);
- updatedValues[pos] = id;
- values = updatedValues;
- }
-
- return true;
- }
-
- /**
- * Remove an ID from this set.
- *
- * @param entryID The ID to be removed
- * @return true if the set was changed, false if it was not changed,
- * for example if the set was undefined or the ID was not present.
- */
- public boolean remove(EntryID entryID)
- {
- if (values == null)
- {
- if(undefinedSize != Long.MAX_VALUE)
- {
- undefinedSize--;
- }
- return true;
- }
-
- if (values.length == 0)
- {
- return false;
- }
-
- // Binary search to locate the ID.
- long id = entryID.longValue();
- int pos = Arrays.binarySearch(values, id);
- if (pos >= 0)
- {
- // Found it.
- long[] updatedValues = new long[values.length-1];
- System.arraycopy(values, 0, updatedValues, 0, pos);
- System.arraycopy(values, pos+1, updatedValues, pos, values.length-pos-1);
- values = updatedValues;
- return true;
- }
- // Not found.
- return false;
- }
-
- /**
- * Check whether this set of entry IDs contains a given ID.
- *
- * @param entryID The ID to be checked.
- * @return true if this set contains the given ID,
- * or if the set is undefined.
- */
- public boolean contains(EntryID entryID)
- {
- if (values == null)
- {
- return true;
- }
-
- final long id = entryID.longValue();
- return values.length != 0
- && id <= values[values.length - 1]
- && Arrays.binarySearch(values, id) >= 0;
- }
-
- /**
- * Takes the intersection of this set with another.
- * Retain those IDs that appear in the given set.
- *
- * @param that The set of IDs that are to be retained from this object.
- */
- public void retainAll(EntryIDSet that)
- {
- if (!isDefined())
- {
- this.values = that.values;
- this.undefinedSize = that.undefinedSize;
- return;
- }
-
- if (!that.isDefined())
- {
- return;
- }
-
- // TODO Perhaps Arrays.asList and retainAll list method are more efficient?
-
- long[] a = this.values;
- long[] b = that.values;
-
- int ai = 0, bi = 0, ci = 0;
- long[] c = new long[Math.min(a.length,b.length)];
- while (ai < a.length && bi < b.length)
- {
- if (a[ai] == b[bi])
- {
- c[ci] = a[ai];
- ai++;
- bi++;
- ci++;
- }
- else if (a[ai] > b[bi])
- {
- bi++;
- }
- else
- {
- ai++;
- }
- }
- if (ci < c.length)
- {
- values = Arrays.copyOf(c, ci);
- }
- else
- {
- values = c;
- }
- }
-
- /**
- * Add all the IDs from a given set that are not already present.
- *
- * @param that The set of IDs to be added. It MUST be defined
- */
- public void addAll(EntryIDSet that)
- {
- if(!that.isDefined())
- {
- return;
- }
-
- if (!isDefined())
- {
- // Assume there are no overlap between IDs in that set with this set
- if(undefinedSize != Long.MAX_VALUE)
- {
- undefinedSize += that.size();
- }
- return;
- }
-
- long[] a = this.values;
- long[] b = that.values;
-
- if (a.length == 0)
- {
- values = b;
- return;
- }
-
- if (b.length == 0)
- {
- return;
- }
-
- // Optimize for case where the two sets are sure to have no overlap.
- if (b[0] > a[a.length-1])
- {
- // All IDs in 'b' are greater than those in 'a'.
- long[] n = new long[a.length + b.length];
- System.arraycopy(a, 0, n, 0, a.length);
- System.arraycopy(b, 0, n, a.length, b.length);
- values = n;
- return;
- }
-
- if (a[0] > b[b.length-1])
- {
- // All IDs in 'a' are greater than those in 'b'.
- long[] n = new long[a.length + b.length];
- System.arraycopy(b, 0, n, 0, b.length);
- System.arraycopy(a, 0, n, b.length, a.length);
- values = n;
- return;
- }
-
- long[] n;
- if ( b.length < a.length ) {
- n = a;
- a = b;
- b = n;
- }
-
- n = new long[a.length + b.length];
-
- int ai, bi, ni;
- for ( ni = 0, ai = 0, bi = 0; ai < a.length && bi < b.length; ) {
- if ( a[ai] < b[bi] ) {
- n[ni++] = a[ai++];
- } else if ( b[bi] < a[ai] ) {
- n[ni++] = b[bi++];
- } else {
- n[ni++] = a[ai];
- ai++;
- bi++;
- }
- }
-
- // Copy any remainder from the first array.
- int aRemain = a.length - ai;
- if (aRemain > 0)
- {
- System.arraycopy(a, ai, n, ni, aRemain);
- ni += aRemain;
- }
-
- // Copy any remainder from the second array.
- int bRemain = b.length - bi;
- if (bRemain > 0)
- {
- System.arraycopy(b, bi, n, ni, bRemain);
- ni += bRemain;
- }
-
- if (ni < n.length)
- {
- values = Arrays.copyOf(n, ni);
- }
- else
- {
- values = n;
- }
- }
-
- /**
- * Delete all IDs in this set that are in a given set.
- *
- * @param that The set of IDs to be deleted. It MUST be defined.
- */
- public void deleteAll(EntryIDSet that)
- {
- if(!that.isDefined())
- {
- return;
- }
-
- if (!isDefined())
- {
- // Assume all IDs in the given set exists in this set.
- if(undefinedSize != Long.MAX_VALUE)
- {
- undefinedSize -= that.size();
- }
- return;
- }
-
- long[] a = this.values;
- long[] b = that.values;
-
- if (a.length == 0 || b.length == 0
- // Optimize for cases where the two sets are sure to have no overlap.
- || b[0] > a[a.length-1]
- || a[0] > b[b.length-1])
- {
- return;
- }
-
- long[] n = new long[a.length];
-
- int ai, bi, ni;
- for ( ni = 0, ai = 0, bi = 0; ai < a.length && bi < b.length; ) {
- if ( a[ai] < b[bi] ) {
- n[ni++] = a[ai++];
- } else if ( b[bi] < a[ai] ) {
- bi++;
- } else {
- ai++;
- bi++;
- }
- }
-
- System.arraycopy(a, ai, n, ni, a.length - ai);
- ni += a.length - ai;
-
- if (ni < a.length)
- {
- values = Arrays.copyOf(n, ni);
- }
- else
- {
- values = n;
- }
- }
-
- /**
- * Create an iterator over the set or an empty iterator
- * if the set is not defined.
- *
- * @return An EntryID iterator.
- */
- @Override
- public Iterator<EntryID> iterator()
- {
- return iterator(null);
- }
-
- /**
- * Create an iterator over the set or an empty iterator
- * if the set is not defined.
- *
- * @param begin The entry ID of the first entry to return in the list.
- *
- * @return An EntryID iterator.
- */
- public Iterator<EntryID> iterator(EntryID begin)
- {
- if (values != null)
- {
- // The set is defined.
- return new IDSetIterator(values, begin);
- }
- // The set is not defined.
- return new IDSetIterator(new long[0]);
- }
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryIDSetSorter.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryIDSetSorter.java
deleted file mode 100644
index af1031d..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EntryIDSetSorter.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2008 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.opendj.ldap.SearchScope;
-import org.opends.server.controls.VLVRequestControl;
-import org.opends.server.controls.VLVResponseControl;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.core.SearchOperation;
-import org.opends.server.protocols.ldap.LDAPResultCode;
-import org.opends.server.types.*;
-
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.util.StaticUtils.*;
-
-/**
- * This class provides a mechanism for sorting the contents of an entry ID set
- * based on a given sort order.
- */
-public class EntryIDSetSorter
-{
- /**
- * Creates a new entry ID set which is a sorted representation of the provided
- * set using the given sort order.
- *
- * @param suffixContainer The suffix container with which the ID list is associated.
- * @param entryIDSet The entry ID set to be sorted.
- * @param searchOperation The search operation being processed.
- * @param sortOrder The sort order to use for the entry ID set.
- * @param vlvRequest The VLV request control included in the search
- * request, or {@code null} if there was none.
- *
- * @return A new entry ID set which is a sorted representation of the
- * provided set using the given sort order.
- *
- * @throws DirectoryException If an error occurs while performing the sort.
- */
- public static EntryIDSet sort(EntryContainer suffixContainer,
- EntryIDSet entryIDSet,
- SearchOperation searchOperation,
- SortOrder sortOrder,
- VLVRequestControl vlvRequest)
- throws DirectoryException
- {
- if (! entryIDSet.isDefined())
- {
- return new EntryIDSet();
- }
-
- DN baseDN = searchOperation.getBaseDN();
- SearchScope scope = searchOperation.getScope();
- SearchFilter filter = searchOperation.getFilter();
-
- TreeMap<SortValues,EntryID> sortMap = new TreeMap<>();
- for (EntryID id : entryIDSet)
- {
- try
- {
- Entry e = suffixContainer.getEntry(id);
- if (e.matchesBaseAndScope(baseDN, scope) && filter.matchesEntry(e))
- {
- sortMap.put(new SortValues(id, e, sortOrder), id);
- }
- }
- catch (Exception e)
- {
- LocalizableMessage message = ERR_ENTRYIDSORTER_CANNOT_EXAMINE_ENTRY.get(id, getExceptionMessage(e));
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e);
- }
- }
-
-
- // See if there is a VLV request to further pare down the set of results,
- // and if there is where it should be processed by offset or assertion value.
- long[] sortedIDs;
- if (vlvRequest != null)
- {
- int beforeCount = vlvRequest.getBeforeCount();
- int afterCount = vlvRequest.getAfterCount();
-
- if (vlvRequest.getTargetType() == VLVRequestControl.TYPE_TARGET_BYOFFSET)
- {
- int targetOffset = vlvRequest.getOffset();
- if (targetOffset < 0)
- {
- // The client specified a negative target offset. This should never be allowed.
- searchOperation.addResponseControl(
- new VLVResponseControl(targetOffset, sortMap.size(),
- LDAPResultCode.OFFSET_RANGE_ERROR));
-
- LocalizableMessage message = ERR_ENTRYIDSORTER_NEGATIVE_START_POS.get();
- throw new DirectoryException(ResultCode.VIRTUAL_LIST_VIEW_ERROR,
- message);
- }
- else if (targetOffset == 0)
- {
- // This is an easy mistake to make, since VLV offsets start at 1
- // instead of 0. We'll assume the client meant to use 1.
- targetOffset = 1;
- }
-
- int listOffset = targetOffset - 1; // VLV offsets start at 1, not 0.
- int startPos = listOffset - beforeCount;
- if (startPos < 0)
- {
- // This can happen if beforeCount >= offset, and in this case we'll
- // just adjust the start position to ignore the range of beforeCount
- // that doesn't exist.
- startPos = 0;
- beforeCount = listOffset;
- }
- else if (startPos >= sortMap.size())
- {
- // The start position is beyond the end of the list. In this case,
- // we'll assume that the start position was one greater than the
- // size of the list and will only return the beforeCount entries.
- targetOffset = sortMap.size() + 1;
- listOffset = sortMap.size();
- startPos = listOffset - beforeCount;
- afterCount = 0;
- }
-
- int count = 1 + beforeCount + afterCount;
- sortedIDs = new long[count];
-
- int treePos = 0;
- int arrayPos = 0;
- for (EntryID id : sortMap.values())
- {
- if (treePos++ < startPos)
- {
- continue;
- }
-
- sortedIDs[arrayPos++] = id.longValue();
- if (arrayPos >= count)
- {
- break;
- }
- }
-
- if (arrayPos < count)
- {
- // We don't have enough entries in the set to meet the requested
- // page size, so we'll need to shorten the array.
- long[] newIDArray = new long[arrayPos];
- System.arraycopy(sortedIDs, 0, newIDArray, 0, arrayPos);
- sortedIDs = newIDArray;
- }
-
- searchOperation.addResponseControl(
- new VLVResponseControl(targetOffset, sortMap.size(),
- LDAPResultCode.SUCCESS));
- }
- else
- {
- ByteString assertionValue = vlvRequest.getGreaterThanOrEqualAssertion();
-
- boolean targetFound = false;
- int targetOffset = 0;
- int includedBeforeCount = 0;
- int includedAfterCount = 0;
- int listSize = 0;
- LinkedList<EntryID> idList = new LinkedList<>();
- for (Map.Entry<SortValues, EntryID> entry : sortMap.entrySet())
- {
- SortValues sortValues = entry.getKey();
- EntryID id = entry.getValue();
-
- if (targetFound)
- {
- idList.add(id);
- listSize++;
- includedAfterCount++;
- if (includedAfterCount >= afterCount)
- {
- break;
- }
- }
- else
- {
- targetFound = sortValues.compareTo(assertionValue) >= 0;
- targetOffset++;
-
- if (targetFound)
- {
- idList.add(id);
- listSize++;
- }
- else if (beforeCount > 0)
- {
- idList.add(id);
- includedBeforeCount++;
- if (includedBeforeCount > beforeCount)
- {
- idList.removeFirst();
- includedBeforeCount--;
- }
- else
- {
- listSize++;
- }
- }
- }
- }
-
- if (! targetFound)
- {
- // No entry was found to be greater than or equal to the sort key, so
- // the target offset will be one greater than the content count.
- targetOffset = sortMap.size() + 1;
- }
-
- sortedIDs = new long[listSize];
- Iterator<EntryID> idIterator = idList.iterator();
- for (int i=0; i < listSize; i++)
- {
- sortedIDs[i] = idIterator.next().longValue();
- }
-
- searchOperation.addResponseControl(
- new VLVResponseControl(targetOffset, sortMap.size(),
- LDAPResultCode.SUCCESS));
- }
- }
- else
- {
- sortedIDs = new long[sortMap.size()];
- int i=0;
- for (EntryID id : sortMap.values())
- {
- sortedIDs[i++] = id.longValue();
- }
- }
-
- return new EntryIDSet(sortedIDs, 0, sortedIDs.length);
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EnvManager.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EnvManager.java
deleted file mode 100644
index 0c47aec..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EnvManager.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2009 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-import org.forgerock.i18n.LocalizableMessage;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import static org.opends.messages.BackendMessages.*;
-
-import java.io.File;
-import java.io.FilenameFilter;
-
-/**
- * A singleton class to manage the life-cycle of a JE database environment.
- */
-public class EnvManager
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
-
- /**
- * A filename filter to match all kinds of JE files.
- */
- private static final FilenameFilter jeAllFilesFilter;
-
- static
- {
- // A filename filter to match all kinds of JE files.
- // JE has a com.sleepycat.je.log.JEFileFilter that would be useful
- // here but is not public.
- jeAllFilesFilter = new FilenameFilter()
- {
- public boolean accept(File d, String name)
- {
- return name.endsWith(".jdb") ||
- name.endsWith(".del") ||
- name.startsWith("je.");
- }
- };
- }
-
- /**
- * Creates the environment home directory, deleting any existing data files
- * if the directory already exists.
- * The environment must not be open.
- *
- * @param homeDir The backend home directory.
- * @throws JebException If an error occurs in the JE backend.
- */
- public static void createHomeDir(String homeDir)
- throws JebException
- {
- File dir = new File(homeDir);
-
- if (dir.exists())
- {
- if (!dir.isDirectory())
- {
- throw new JebException(ERR_DIRECTORY_INVALID.get(homeDir));
- }
- removeFiles(homeDir);
- }
- else
- {
- try
- {
- dir.mkdir();
- }
- catch (Exception e)
- {
- logger.traceException(e);
- throw new JebException(ERR_CREATE_FAIL.get(e.getMessage()), e);
- }
- }
- }
-
- /**
- * Deletes all the data files associated with the environment.
- * The environment must not be open.
- *
- * @param homeDir The backend home directory
- * @throws JebException If an error occurs in the JE backend or if the
- * specified home directory does not exist.
- */
- public static void removeFiles(String homeDir)
- throws JebException
- {
- File dir = new File(homeDir);
- if (!dir.exists())
- {
- LocalizableMessage message = ERR_DIRECTORY_DOES_NOT_EXIST.get(homeDir);
- throw new JebException(message);
- }
- if (!dir.isDirectory())
- {
- throw new JebException(ERR_DIRECTORY_INVALID.get(homeDir));
- }
-
- try
- {
- File[] jdbFiles = dir.listFiles(jeAllFilesFilter);
- for (File f : jdbFiles)
- {
- f.delete();
- }
- }
- catch (Exception e)
- {
- logger.traceException(e);
- throw new JebException(ERR_REMOVE_FAIL.get(e.getMessage()), e);
- }
- }
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EqualityIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EqualityIndexer.java
deleted file mode 100644
index 0a06dfc..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/EqualityIndexer.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.Collection;
-
-import org.forgerock.opendj.ldap.ByteSequence;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.schema.MatchingRule;
-import org.forgerock.opendj.ldap.schema.Schema;
-import org.forgerock.opendj.ldap.spi.Indexer;
-import org.opends.server.types.AttributeType;
-
-/**
- * An implementation of an Indexer for attribute equality.
- */
-public class EqualityIndexer implements Indexer
-{
-
- /**
- * The attribute type equality matching rule which is also the
- * comparator for the index keys generated by this class.
- */
- private final MatchingRule equalityRule;
-
- /**
- * Create a new attribute equality indexer for the given index configuration.
- * @param attributeType The attribute type for which an indexer is
- * required.
- */
- public EqualityIndexer(AttributeType attributeType)
- {
- this.equalityRule = attributeType.getEqualityMatchingRule();
- }
-
- /** {@inheritDoc} */
- @Override
- public String getIndexID()
- {
- return "equality";
- }
-
- /** {@inheritDoc} */
- @Override
- public void createKeys(Schema schema, ByteSequence value, Collection<ByteString> keys) throws DecodeException
- {
- keys.add(equalityRule.normalizeAttributeValue(value));
- }
-
- @Override
- public String keyToHumanReadableString(ByteSequence key)
- {
- return key.toString();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ExportJob.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ExportJob.java
deleted file mode 100644
index 83243eb..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ExportJob.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2012-2015 ForgeRock AS.
- */
-package org.opends.server.backends.jeb;
-import com.sleepycat.je.Cursor;
-import com.sleepycat.je.CursorConfig;
-import com.sleepycat.je.DatabaseEntry;
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.LockMode;
-import com.sleepycat.je.OperationStatus;
-
-import org.opends.server.util.LDIFException;
-import org.opends.server.util.StaticUtils;
-
-import java.io.IOException;
-import java.util.*;
-
-import org.opends.server.types.*;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import static org.opends.messages.BackendMessages.*;
-
-/**
- * Export a JE backend to LDIF.
- */
-public class ExportJob
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
-
- /**
- * The requested LDIF export configuration.
- */
- private LDIFExportConfig exportConfig;
-
- /**
- * The number of milliseconds between job progress reports.
- */
- private long progressInterval = 10000;
-
- /**
- * The current number of entries exported.
- */
- private long exportedCount;
-
- /**
- * The current number of entries skipped.
- */
- private long skippedCount;
-
- /**
- * Create a new export job.
- *
- * @param exportConfig The requested LDIF export configuration.
- */
- public ExportJob(LDIFExportConfig exportConfig)
- {
- this.exportConfig = exportConfig;
- }
-
- /**
- * Export entries from the backend to an LDIF file.
- * @param rootContainer The root container to export.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws IOException If an I/O error occurs while writing an entry.
- * @throws JebException If an error occurs in the JE backend.
- * @throws LDIFException If an error occurs while trying to determine whether
- * to write an entry.
- */
- public void exportLDIF(RootContainer rootContainer)
- throws IOException, LDIFException, DatabaseException, JebException
- {
- List<DN> includeBranches = exportConfig.getIncludeBranches();
- ArrayList<EntryContainer> exportContainers = new ArrayList<>();
-
- for (EntryContainer entryContainer : rootContainer.getEntryContainers())
- {
- // Skip containers that are not covered by the include branches.
- DN baseDN = entryContainer.getBaseDN();
-
- if (includeBranches == null || includeBranches.isEmpty())
- {
- exportContainers.add(entryContainer);
- }
- else
- {
- for (DN includeBranch : includeBranches)
- {
- if (includeBranch.isDescendantOf(baseDN) ||
- includeBranch.isAncestorOf(baseDN))
- {
- exportContainers.add(entryContainer);
- break;
- }
- }
- }
- }
-
- // Make a note of the time we started.
- long startTime = System.currentTimeMillis();
-
- // Start a timer for the progress report.
- Timer timer = new Timer();
- TimerTask progressTask = new ProgressTask();
- timer.scheduleAtFixedRate(progressTask, progressInterval,
- progressInterval);
-
- // Iterate through the containers.
- try
- {
- for (EntryContainer exportContainer : exportContainers)
- {
- if (exportConfig.isCancelled())
- {
- break;
- }
-
- exportContainer.sharedLock.lock();
- try
- {
- exportContainer(exportContainer);
- }
- finally
- {
- exportContainer.sharedLock.unlock();
- }
- }
- }
- finally
- {
- timer.cancel();
- }
-
-
- long finishTime = System.currentTimeMillis();
- long totalTime = finishTime - startTime;
-
- float rate = 0;
- if (totalTime > 0)
- {
- rate = 1000f*exportedCount / totalTime;
- }
-
- logger.info(NOTE_EXPORT_FINAL_STATUS, exportedCount, skippedCount, totalTime/1000, rate);
-
- }
-
- /**
- * Export the entries in a single entry entryContainer, in other words from
- * one of the base DNs.
- * @param entryContainer The entry container that holds the entries to be
- * exported.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws IOException If an error occurs while writing an entry.
- * @throws LDIFException If an error occurs while trying to determine
- * whether to write an entry.
- */
- private void exportContainer(EntryContainer entryContainer)
- throws DatabaseException, IOException, LDIFException
- {
- ID2Entry id2entry = entryContainer.getID2Entry();
-
- Cursor cursor = id2entry.openCursor(null, new CursorConfig());
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- OperationStatus status;
- for (status = cursor.getFirst(key, data, LockMode.DEFAULT);
- status == OperationStatus.SUCCESS;
- status = cursor.getNext(key, data, LockMode.DEFAULT))
- {
- if (exportConfig.isCancelled())
- {
- break;
- }
-
- EntryID entryID = null;
- try
- {
- entryID = new EntryID(key);
- }
- catch (Exception e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("Malformed id2entry ID %s.%n",
- StaticUtils.bytesToHex(key.getData()));
- }
- skippedCount++;
- continue;
- }
-
- if (entryID.longValue() == 0)
- {
- // This is the stored entry count.
- continue;
- }
-
- Entry entry = null;
- try
- {
- entry = ID2Entry.entryFromDatabase(ByteString.wrap(data.getData()),
- entryContainer.getRootContainer().getCompressedSchema());
- }
- catch (Exception e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("Malformed id2entry record for ID %d:%n%s%n",
- entryID.longValue(),
- StaticUtils.bytesToHex(data.getData()));
- }
- skippedCount++;
- continue;
- }
-
- if (entry.toLDIF(exportConfig))
- {
- exportedCount++;
- }
- else
- {
- skippedCount++;
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * This class reports progress of the export job at fixed intervals.
- */
- class ProgressTask extends TimerTask
- {
- /**
- * The number of entries that had been exported at the time of the
- * previous progress report.
- */
- private long previousCount;
-
- /**
- * The time in milliseconds of the previous progress report.
- */
- private long previousTime;
-
- /**
- * Create a new export progress task.
- */
- public ProgressTask()
- {
- previousTime = System.currentTimeMillis();
- }
-
- /**
- * The action to be performed by this timer task.
- */
- public void run()
- {
- long latestCount = exportedCount;
- long deltaCount = latestCount - previousCount;
- long latestTime = System.currentTimeMillis();
- long deltaTime = latestTime - previousTime;
-
- if (deltaTime == 0)
- {
- return;
- }
-
- float rate = 1000f*deltaCount / deltaTime;
-
- logger.info(NOTE_EXPORT_PROGRESS_REPORT, latestCount, skippedCount, rate);
-
- previousCount = latestCount;
- previousTime = latestTime;
- }
- }
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2CIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2CIndexer.java
deleted file mode 100644
index 54d3a52..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2CIndexer.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.*;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-import com.sleepycat.je.DatabaseEntry;
-
-/**
- * Implementation of an Indexer for the children index.
- */
-public class ID2CIndexer extends Indexer
-{
- /**
- * Create a new indexer for a children index.
- */
- public ID2CIndexer()
- {
- // No implementation required.
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return "id2children";
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> addKeys)
- {
- // The superior entry IDs are in the entry attachment.
- ArrayList<EntryID> ids = (ArrayList<EntryID>) entry.getAttachment();
-
- // Skip the entry's own ID.
- Iterator<EntryID> iter = ids.iterator();
- iter.next();
-
- // Get the parent ID.
- if (iter.hasNext())
- {
- DatabaseEntry nodeIDData = iter.next().getDatabaseEntry();
- addKeys.add(ByteString.wrap(nodeIDData.getData()));
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods,
- Map<ByteString, Boolean> modifiedKeys)
- {
- // Nothing to do.
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2Entry.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2Entry.java
deleted file mode 100644
index 792f1b3..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2Entry.java
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2012-2015 ForgeRock AS.
- */
-package org.opends.server.backends.jeb;
-
-import static com.sleepycat.je.OperationStatus.*;
-
-import static org.forgerock.util.Utils.*;
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.core.DirectoryServer.*;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.zip.DataFormatException;
-import java.util.zip.DeflaterOutputStream;
-import java.util.zip.InflaterOutputStream;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.io.ASN1;
-import org.forgerock.opendj.io.ASN1Reader;
-import org.forgerock.opendj.io.ASN1Writer;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.opends.server.api.CompressedSchema;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.Entry;
-import org.opends.server.types.LDAPException;
-
-import com.sleepycat.je.*;
-
-/**
- * Represents the database containing the LDAP entries. The database key is
- * the entry ID and the value is the entry contents.
- */
-public class ID2Entry extends DatabaseContainer
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** Parameters for compression and encryption. */
- private DataConfig dataConfig;
-
- /** Cached encoding buffers. */
- private static final ThreadLocal<EntryCodec> ENTRY_CODEC_CACHE = new ThreadLocal<EntryCodec>()
- {
- @Override
- protected EntryCodec initialValue()
- {
- return new EntryCodec();
- }
- };
-
- private static EntryCodec acquireEntryCodec()
- {
- EntryCodec codec = ENTRY_CODEC_CACHE.get();
- if (codec.maxBufferSize != getMaxInternalBufferSize())
- {
- // Setting has changed, so recreate the codec.
- codec = new EntryCodec();
- ENTRY_CODEC_CACHE.set(codec);
- }
- return codec;
- }
-
- /**
- * A cached set of ByteStringBuilder buffers and ASN1Writer used to encode
- * entries.
- */
- private static final class EntryCodec
- {
- private static final int BUFFER_INIT_SIZE = 512;
-
- private final ByteStringBuilder encodedBuffer = new ByteStringBuilder();
- private final ByteStringBuilder entryBuffer = new ByteStringBuilder();
- private final ByteStringBuilder compressedEntryBuffer = new ByteStringBuilder();
- private final ASN1Writer writer;
- private final int maxBufferSize;
-
- private EntryCodec()
- {
- this.maxBufferSize = getMaxInternalBufferSize();
- this.writer = ASN1.getWriter(encodedBuffer, maxBufferSize);
- }
-
- private void release()
- {
- closeSilently(writer);
- encodedBuffer.clearAndTruncate(maxBufferSize, BUFFER_INIT_SIZE);
- entryBuffer.clearAndTruncate(maxBufferSize, BUFFER_INIT_SIZE);
- compressedEntryBuffer.clearAndTruncate(maxBufferSize, BUFFER_INIT_SIZE);
- }
-
- private Entry decode(ByteString bytes, CompressedSchema compressedSchema)
- throws DirectoryException, DecodeException, LDAPException,
- DataFormatException, IOException
- {
- // Get the format version.
- byte formatVersion = bytes.byteAt(0);
- if(formatVersion != JebFormat.FORMAT_VERSION)
- {
- throw DecodeException.error(ERR_INCOMPATIBLE_ENTRY_VERSION.get(formatVersion));
- }
-
- // Read the ASN1 sequence.
- ASN1Reader reader = ASN1.getReader(bytes.subSequence(1, bytes.length()));
- reader.readStartSequence();
-
- // See if it was compressed.
- int uncompressedSize = (int)reader.readInteger();
- if(uncompressedSize > 0)
- {
- // It was compressed.
- reader.readOctetString(compressedEntryBuffer);
-
- OutputStream decompressor = null;
- try
- {
- // TODO: Should handle the case where uncompress fails
- decompressor = new InflaterOutputStream(entryBuffer.asOutputStream());
- compressedEntryBuffer.copyTo(decompressor);
- }
- finally {
- closeSilently(decompressor);
- }
-
- // Since we are used the cached buffers (ByteStringBuilders),
- // the decoded attribute values will not refer back to the
- // original buffer.
- return Entry.decode(entryBuffer.asReader(), compressedSchema);
- }
- else
- {
- // Since we don't have to do any decompression, we can just decode
- // the entry directly.
- ByteString encodedEntry = reader.readOctetString();
- return Entry.decode(encodedEntry.asReader(), compressedSchema);
- }
- }
-
- private ByteString encodeCopy(Entry entry, DataConfig dataConfig)
- throws DirectoryException
- {
- encodeVolatile(entry, dataConfig);
- return encodedBuffer.toByteString();
- }
-
- private DatabaseEntry encodeInternal(Entry entry, DataConfig dataConfig)
- throws DirectoryException
- {
- encodeVolatile(entry, dataConfig);
- return new DatabaseEntry(encodedBuffer.getBackingArray(), 0, encodedBuffer.length());
- }
-
- private void encodeVolatile(Entry entry, DataConfig dataConfig) throws DirectoryException
- {
- // Encode the entry for later use.
- entry.encode(entryBuffer, dataConfig.getEntryEncodeConfig());
-
- // First write the DB format version byte.
- encodedBuffer.appendByte(JebFormat.FORMAT_VERSION);
-
- try
- {
- // Then start the ASN1 sequence.
- writer.writeStartSequence(JebFormat.TAG_DATABASE_ENTRY);
-
- if (dataConfig.isCompressed())
- {
- OutputStream compressor = null;
- try {
- compressor = new DeflaterOutputStream(compressedEntryBuffer.asOutputStream());
- entryBuffer.copyTo(compressor);
- }
- finally {
- closeSilently(compressor);
- }
-
- // Compression needed and successful.
- writer.writeInteger(entryBuffer.length());
- writer.writeOctetString(compressedEntryBuffer);
- }
- else
- {
- writer.writeInteger(0);
- writer.writeOctetString(entryBuffer);
- }
-
- writer.writeEndSequence();
- }
- catch(IOException ioe)
- {
- // TODO: This should never happen with byte buffer.
- logger.traceException(ioe);
- }
- }
- }
-
- /**
- * Create a new ID2Entry object.
- *
- * @param name The name of the entry database.
- * @param dataConfig The desired compression and encryption options for data
- * stored in the entry database.
- * @param env The JE Environment.
- * @param entryContainer The entryContainer of the entry database.
- * @throws DatabaseException If an error occurs in the JE database.
- *
- */
- ID2Entry(String name, DataConfig dataConfig, Environment env, EntryContainer entryContainer)
- throws DatabaseException
- {
- super(name, env, entryContainer);
- this.dataConfig = dataConfig;
- this.dbConfig = JEBUtils.toDatabaseConfigNoDuplicates(env);
- }
-
- /**
- * Decodes an entry from its database representation.
- * <p>
- * An entry on disk is ASN1 encoded in this format:
- *
- * <pre>
- * DatabaseEntry ::= [APPLICATION 0] IMPLICIT SEQUENCE {
- * uncompressedSize INTEGER, -- A zero value means not compressed.
- * dataBytes OCTET STRING -- Optionally compressed encoding of
- * the data bytes.
- * }
- *
- * ID2EntryValue ::= DatabaseEntry
- * -- Where dataBytes contains an encoding of DirectoryServerEntry.
- *
- * DirectoryServerEntry ::= [APPLICATION 1] IMPLICIT SEQUENCE {
- * dn LDAPDN,
- * objectClasses SET OF LDAPString,
- * userAttributes AttributeList,
- * operationalAttributes AttributeList
- * }
- * </pre>
- *
- * @param bytes A byte array containing the encoded database value.
- * @param compressedSchema The compressed schema manager to use when decoding.
- * @return The decoded entry.
- * @throws DecodeException If the data is not in the expected ASN.1 encoding
- * format.
- * @throws LDAPException If the data is not in the expected ASN.1 encoding
- * format.
- * @throws DataFormatException If an error occurs while trying to decompress
- * compressed data.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws IOException if an error occurs while reading the ASN1 sequence.
- */
- public static Entry entryFromDatabase(ByteString bytes,
- CompressedSchema compressedSchema) throws DirectoryException,
- DecodeException, LDAPException, DataFormatException, IOException
- {
- EntryCodec codec = acquireEntryCodec();
- try
- {
- return codec.decode(bytes, compressedSchema);
- }
- finally
- {
- codec.release();
- }
- }
-
- /**
- * Encodes an entry to the raw database format, with optional compression.
- *
- * @param entry The entry to encode.
- * @param dataConfig Compression and cryptographic options.
- * @return A ByteSTring containing the encoded database value.
- *
- * @throws DirectoryException If a problem occurs while attempting to encode
- * the entry.
- */
- static ByteString entryToDatabase(Entry entry, DataConfig dataConfig) throws DirectoryException
- {
- EntryCodec codec = acquireEntryCodec();
- try
- {
- return codec.encodeCopy(entry, dataConfig);
- }
- finally
- {
- codec.release();
- }
- }
-
-
-
- /**
- * Insert a record into the entry database.
- *
- * @param txn The database transaction or null if none.
- * @param id The entry ID which forms the key.
- * @param entry The LDAP entry.
- * @return true if the entry was inserted, false if a record with that
- * ID already existed.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a problem occurs while attempting to encode
- * the entry.
- */
- boolean insert(Transaction txn, EntryID id, Entry entry)
- throws DatabaseException, DirectoryException
- {
- DatabaseEntry key = id.getDatabaseEntry();
- EntryCodec codec = acquireEntryCodec();
- try
- {
- DatabaseEntry data = codec.encodeInternal(entry, dataConfig);
- return insert(txn, key, data) == SUCCESS;
- }
- finally
- {
- codec.release();
- }
- }
-
- /**
- * Write a record in the entry database.
- *
- * @param txn The database transaction or null if none.
- * @param id The entry ID which forms the key.
- * @param entry The LDAP entry.
- * @return true if the entry was written, false if it was not.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a problem occurs while attempting to encode
- * the entry.
- */
- public boolean put(Transaction txn, EntryID id, Entry entry)
- throws DatabaseException, DirectoryException
- {
- DatabaseEntry key = id.getDatabaseEntry();
- EntryCodec codec = acquireEntryCodec();
- try
- {
- DatabaseEntry data = codec.encodeInternal(entry, dataConfig);
- return put(txn, key, data) == SUCCESS;
- }
- finally
- {
- codec.release();
- }
- }
-
- /**
- * Remove a record from the entry database.
- *
- * @param txn The database transaction or null if none.
- * @param id The entry ID which forms the key.
- * @return true if the entry was removed, false if it was not.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- boolean remove(Transaction txn, EntryID id) throws DatabaseException
- {
- DatabaseEntry key = id.getDatabaseEntry();
- return delete(txn, key) == SUCCESS;
- }
-
- /**
- * Fetch a record from the entry database.
- *
- * @param txn The database transaction or null if none.
- * @param id The desired entry ID which forms the key.
- * @param lockMode The JE locking mode to be used for the read.
- * @return The requested entry, or null if there is no such record.
- * @throws DirectoryException If a problem occurs while getting the entry.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public Entry get(Transaction txn, EntryID id, LockMode lockMode)
- throws DirectoryException, DatabaseException
- {
- DatabaseEntry key = id.getDatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- if (read(txn, key, data, lockMode) != SUCCESS)
- {
- return null;
- }
-
- try
- {
- Entry entry = entryFromDatabase(ByteString.wrap(data.getData()),
- entryContainer.getRootContainer().getCompressedSchema());
- entry.processVirtualAttributes();
- return entry;
- }
- catch (Exception e)
- {
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_ENTRY_DATABASE_CORRUPT.get(id));
- }
- }
-
- /**
- * Set the desired compression and encryption options for data
- * stored in the entry database.
- *
- * @param dataConfig The desired compression and encryption options for data
- * stored in the entry database.
- */
- public void setDataConfig(DataConfig dataConfig)
- {
- this.dataConfig = dataConfig;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2SIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2SIndexer.java
deleted file mode 100644
index c672282..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ID2SIndexer.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.*;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-import com.sleepycat.je.DatabaseEntry;
-
-/**
- * Implementation of an Indexer for the subtree index.
- */
-public class ID2SIndexer extends Indexer
-{
- /**
- * Create a new indexer for a subtree index.
- */
- public ID2SIndexer()
- {
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return "id2subtree";
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> addKeys)
- {
- // The superior entry IDs are in the entry attachment.
- ArrayList<EntryID> ids = (ArrayList<EntryID>) entry.getAttachment();
-
- // Skip the entry's own ID.
- Iterator<EntryID> iter = ids.iterator();
- iter.next();
-
- // Iterate through the superior IDs.
- while (iter.hasNext())
- {
- DatabaseEntry nodeIDData = iter.next().getDatabaseEntry();
- addKeys.add(ByteString.wrap(nodeIDData.getData()));
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods,
- Map<ByteString, Boolean> modifiedKeys)
- {
- // Nothing to do.
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IDSetIterator.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IDSetIterator.java
deleted file mode 100644
index 6c9343c..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IDSetIterator.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- */
-package org.opends.server.backends.jeb;
-
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-/**
- * Iterator for a set of Entry IDs. It must return values in order of ID.
- */
-public class IDSetIterator implements Iterator<EntryID>
-{
- /**
- * An array of ID values in order of ID.
- */
- private long[] entryIDList;
-
- /**
- * Current position of the iterator as an index into the array of IDs.
- */
- private int i;
-
- /**
- * Create a new iterator for a given array of entry IDs.
- * @param entryIDList An array of IDs in order or ID.
- */
- public IDSetIterator(long[] entryIDList)
- {
- this.entryIDList = entryIDList;
- }
-
- /**
- * Create a new iterator for a given array of entry IDs.
- * @param entryIDList An array of IDs in order or ID.
- * @param begin The entry ID of the first entry that should be returned, or
- * {@code null} if it should start at the beginning of the list.
- */
- public IDSetIterator(long[] entryIDList, EntryID begin)
- {
- this.entryIDList = entryIDList;
-
- if (begin == null)
- {
- i = 0;
- }
- else
- {
- for (i=0; i < entryIDList.length; i++)
- {
- if (entryIDList[i] == begin.longValue())
- {
- break;
- }
- }
-
- if (i >= entryIDList.length)
- {
- i = 0;
- }
- }
- }
-
- /**
- * Returns <tt>true</tt> if the iteration has more elements. (In other
- * words, returns <tt>true</tt> if <tt>next</tt> would return an element
- * rather than throwing an exception.)
- *
- * @return <tt>true</tt> if the iterator has more elements.
- */
- public boolean hasNext()
- {
- return i < entryIDList.length;
- }
-
- /**
- * Returns the next element in the iteration. Calling this method
- * repeatedly until the {@link #hasNext()} method returns false will
- * return each element in the underlying collection exactly once.
- *
- * @return the next element in the iteration.
- * @throws java.util.NoSuchElementException
- * iteration has no more elements.
- */
- public EntryID next()
- throws NoSuchElementException
- {
- if (i < entryIDList.length)
- {
- return new EntryID(entryIDList[i++]);
- }
- throw new NoSuchElementException();
- }
-
- /**
- *
- * Removes from the underlying collection the last element returned by the
- * iterator (optional operation). This method can be called only once per
- * call to <tt>next</tt>. The behavior of an iterator is unspecified if
- * the underlying collection is modified while the iteration is in
- * progress in any way other than by calling this method.
- *
- * @exception UnsupportedOperationException if the <tt>remove</tt>
- * operation is not supported by this Iterator.
- */
- public void remove() throws UnsupportedOperationException
- {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ImportIDSet.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ImportIDSet.java
deleted file mode 100644
index cf942e4..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ImportIDSet.java
+++ /dev/null
@@ -1,474 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2009 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.nio.ByteBuffer;
-
-import org.forgerock.util.Reject;
-
-/**
- * This class manages the set of ID that are to be eventually added to an index
- * database. It is responsible for determining if the number of IDs is above
- * the configured ID limit. If the limit it reached, the class stops tracking
- * individual IDs and marks the set as undefined. This class is not thread safe.
- */
-class ImportIDSet {
-
- /** The internal array where elements are stored. */
- private long[] array;
- /** The number of valid elements in the array. */
- private int count;
- /** Boolean to keep track if the instance is defined or not. */
- private boolean isDefined = true;
- /** Size of the undefined if count is kept. */
- private long undefinedSize;
- /** Key related to an ID set. */
- private ByteBuffer key;
- /** The entry limit size. */
- private final int limit;
- /** Set to true if a count of ids above the entry limit should be kept. */
- private final boolean doCount;
-
- /**
- * Create an import ID set of the specified size, index limit and index
- * maintain count, plus an extra 128 slots.
- *
- * @param size The size of the the underlying array, plus some extra space.
- * @param limit The index entry limit.
- * @param doCount The index maintain count.
- */
- public ImportIDSet(int size, int limit, boolean doCount)
- {
- this.array = new long[size + 128];
- // A limit of 0 means unlimited.
- this.limit = limit == 0 ? Integer.MAX_VALUE : limit;
- this.doCount = doCount;
- }
-
- /**
- * Clear the set so it can be reused again. The boolean indexParam specifies
- * if the index parameters should be cleared also.
- */
- public void clear()
- {
- undefinedSize = 0;
- isDefined = true;
- count = 0;
- }
-
- /**
- * Return if an import ID set is defined or not.
- *
- * @return <CODE>True</CODE> if an import ID set is defined.
- */
- public boolean isDefined()
- {
- return isDefined;
- }
-
- /** Set an import ID set to undefined. */
- void setUndefined() {
- array = null;
- isDefined = false;
- }
-
- /**
- * Add the specified entry id to an import ID set.
- *
- * @param entryID The entry ID to add to an import ID set.
- */
- void addEntryID(EntryID entryID) {
- addEntryID(entryID.longValue());
- }
-
- /**
- * Add the specified long value to an import ID set.
- *
- * @param entryID The long value to add to an import ID set.
- */
- void addEntryID(long entryID) {
- Reject.ifTrue(entryID < 0, "entryID must always be positive");
-
- if(!isDefined()) {
- if(doCount) {
- undefinedSize++;
- }
- return;
- }
- if (isDefined() && count + 1 > limit)
- {
- setUndefined();
- if(doCount) {
- undefinedSize = count + 1;
- } else {
- undefinedSize = Long.MAX_VALUE;
- }
- count = 0;
- } else {
- add(entryID);
- }
- }
-
- private boolean mergeCount(byte[] dBbytes, ImportIDSet importIdSet) {
- boolean incrementLimitCount=false;
- boolean dbUndefined = isDBUndefined(dBbytes);
-
- if (dbUndefined && !importIdSet.isDefined()) {
- undefinedSize = JebFormat.entryIDUndefinedSizeFromDatabase(dBbytes) +
- importIdSet.undefinedSize;
- isDefined=false;
- } else if (dbUndefined && importIdSet.isDefined()) {
- undefinedSize = JebFormat.entryIDUndefinedSizeFromDatabase(dBbytes) +
- importIdSet.size();
- isDefined=false;
- } else if(!importIdSet.isDefined()) {
- int dbSize = JebFormat.entryIDListFromDatabase(dBbytes).length;
- undefinedSize = dbSize + importIdSet.undefinedSize;
- isDefined = false;
- incrementLimitCount = true;
- } else {
- array = JebFormat.entryIDListFromDatabase(dBbytes);
- if(array.length + importIdSet.size() > limit) {
- undefinedSize = array.length + importIdSet.size();
- isDefined=false;
- incrementLimitCount=true;
- } else {
- count = array.length;
- addAll(importIdSet);
- }
- }
- return incrementLimitCount;
- }
-
- /**
- * Remove the specified import ID set from the byte array read from the DB.
- *
- * @param bytes The byte array read from JEB.
- * @param importIdSet The import ID set to delete.
- */
- public void remove(byte[] bytes, ImportIDSet importIdSet)
- {
- if (isDBUndefined(bytes)) {
- isDefined=false;
- importIdSet.setUndefined();
- undefinedSize = Long.MAX_VALUE;
- } else if(!importIdSet.isDefined()) {
- isDefined=false;
- undefinedSize = Long.MAX_VALUE;
- } else {
- array = JebFormat.entryIDListFromDatabase(bytes);
- if(array.length - importIdSet.size() > limit) {
- isDefined=false;
- count = 0;
- importIdSet.setUndefined();
- undefinedSize = Long.MAX_VALUE;
- } else {
- count = array.length;
- removeAll(importIdSet);
- }
- }
- }
-
- /**
- * Merge the specified byte array read from a DB, with the specified import
- * ID set. The specified limit and maintain count parameters define
- * if the newly merged set is defined or not.
- *
- * @param bytes The byte array of IDs read from a DB.
- * @param importIdSet The import ID set to merge the byte array with.
- * @return <CODE>True</CODE> if the import ID set started keeping a count as
- * a result of the merge.
- */
- public boolean merge(byte[] bytes, ImportIDSet importIdSet)
- {
- boolean incrementLimitCount=false;
- if(doCount) {
- incrementLimitCount = mergeCount(bytes, importIdSet);
- } else if (isDBUndefined(bytes)) {
- isDefined = false;
- importIdSet.setUndefined();
- undefinedSize = Long.MAX_VALUE;
- count = 0;
- } else if(!importIdSet.isDefined()) {
- isDefined = false;
- incrementLimitCount = true;
- undefinedSize = Long.MAX_VALUE;
- count = 0;
- } else {
- array = JebFormat.entryIDListFromDatabase(bytes);
- if (array.length + importIdSet.size() > limit) {
- isDefined = false;
- incrementLimitCount = true;
- count = 0;
- importIdSet.setUndefined();
- undefinedSize = Long.MAX_VALUE;
- } else {
- count = array.length;
- addAll(importIdSet);
- }
- }
- return incrementLimitCount;
- }
-
- private boolean isDBUndefined(byte[] bytes)
- {
- return (bytes[0] & 0x80) == 0x80;
- }
-
- private void removeAll(ImportIDSet that) {
- long[] newArray = new long[array.length];
- int c = 0;
- for(int i=0; i < count; i++)
- {
- if(binarySearch(that.array, that.count, array[i]) < 0)
- {
- newArray[c++] = array[i];
- }
- }
- array = newArray;
- count = c;
- }
-
- private void addAll(ImportIDSet that) {
- resize(this.count+that.count);
-
- if (that.count == 0)
- {
- return;
- }
-
- // Optimize for the case where the two sets are sure to have no overlap.
- if (this.count == 0 || that.array[0] > this.array[this.count-1])
- {
- System.arraycopy(that.array, 0, this.array, this.count, that.count);
- count += that.count;
- return;
- }
-
- if (this.array[0] > that.array[that.count-1])
- {
- System.arraycopy(this.array, 0, this.array, that.count, this.count);
- System.arraycopy(that.array, 0, this.array, 0, that.count);
- count += that.count;
- return;
- }
-
- int destPos = binarySearch(this.array, this.count, that.array[0]);
- if (destPos < 0)
- {
- destPos = -(destPos+1);
- }
-
- // Make space for the copy.
- int aCount = this.count - destPos;
- int aPos = destPos + that.count;
- int aEnd = aPos + aCount;
- System.arraycopy(this.array, destPos, this.array, aPos, aCount);
-
- // Optimize for the case where there is no overlap.
- if (this.array[aPos] > that.array[that.count-1])
- {
- System.arraycopy(that.array, 0, this.array, destPos, that.count);
- count += that.count;
- return;
- }
-
- int bPos;
- for ( bPos = 0; aPos < aEnd && bPos < that.count; )
- {
- if ( this.array[aPos] < that.array[bPos] )
- {
- this.array[destPos++] = this.array[aPos++];
- }
- else if ( this.array[aPos] > that.array[bPos] )
- {
- this.array[destPos++] = that.array[bPos++];
- }
- else
- {
- this.array[destPos++] = this.array[aPos++];
- bPos++;
- }
- }
-
- // Copy any remainder.
- int aRemain = aEnd - aPos;
- if (aRemain > 0)
- {
- System.arraycopy(this.array, aPos, this.array, destPos, aRemain);
- destPos += aRemain;
- }
-
- int bRemain = that.count - bPos;
- if (bRemain > 0)
- {
- System.arraycopy(that.array, bPos, this.array, destPos, bRemain);
- destPos += bRemain;
- }
-
- count = destPos;
- }
-
- /**
- * Return the number of IDs in an import ID set.
- *
- * @return The current size of an import ID set.
- */
- public int size()
- {
- return count;
- }
-
- private boolean add(long entryID)
- {
- resize(count+1);
-
- if (count == 0 || entryID > array[count-1])
- {
- array[count++] = entryID;
- return true;
- }
-
- int pos = binarySearch(array, count, entryID);
- if (pos >=0)
- {
- return false;
- }
-
- // For a negative return value r, the index -(r+1) gives the array
- // index at which the specified value can be inserted to maintain
- // the sorted order of the array.
- pos = -(pos+1);
-
- System.arraycopy(array, pos, array, pos+1, count-pos);
- array[pos] = entryID;
- count++;
- return true;
- }
-
- private static int binarySearch(long[] a, int count, long key)
- {
- int low = 0;
- int high = count-1;
-
- while (low <= high)
- {
- int mid = low + high >> 1;
- long midVal = a[mid];
-
- if (midVal < key)
- {
- low = mid + 1;
- }
- else if (midVal > key)
- {
- high = mid - 1;
- }
- else
- {
- return mid; // key found
- }
- }
- return -(low + 1); // key not found.
- }
-
- private void resize(int size)
- {
- if (array == null)
- {
- array = new long[size];
- }
- else if (array.length < size)
- {
- // Expand the size of the array in powers of two.
- int newSize = array.length == 0 ? 1 : array.length;
- do
- {
- newSize *= 2;
- } while (newSize < size);
-
- long[] newBytes = new long[newSize];
- System.arraycopy(array, 0, newBytes, 0, count);
- array = newBytes;
- }
- }
-
- /**
- * Create a byte array suitable to write to a JEB DB from an import ID set.
- *
- * @return A byte array suitable for writing to a JEB DB.
- */
- public byte[] toDatabase()
- {
- if(isDefined) {
- return encodeDefined();
- } else {
- return JebFormat.entryIDUndefinedSizeToDatabase(undefinedSize);
- }
- }
-
- private byte[] encodeDefined()
- {
- final int encodedSize = count * 8;
- final byte[] bytes = new byte[encodedSize];
- int pos = 0;
- for (int i = 0; i < count; i++) {
- final long id = array[i] & 0x00ffffffffL; // JNR: why is this necessary?
-
- // encode the entryID
- bytes[pos++] = (byte) ((id >>> 56) & 0xFF);
- bytes[pos++] = (byte) ((id >>> 48) & 0xFF);
- bytes[pos++] = (byte) ((id >>> 40) & 0xFF);
- bytes[pos++] = (byte) ((id >>> 32) & 0xFF);
- bytes[pos++] = (byte) ((id >>> 24) & 0xFF);
- bytes[pos++] = (byte) ((id >>> 16) & 0xFF);
- bytes[pos++] = (byte) ((id >>> 8) & 0xFF);
- bytes[pos++] = (byte) (id & 0xFF);
- }
- return bytes;
- }
-
- /**
- * Set the DB key related to an import ID set.
- *
- * @param key Byte array containing the key.
- */
- public void setKey(ByteBuffer key)
- {
- this.key = key;
- }
-
- /**
- * Return the DB key related to an import ID set.
- *
- * @return The byte array containing the key.
- */
- public ByteBuffer getKey()
- {
- return key;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ImportLDIFReader.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ImportLDIFReader.java
deleted file mode 100644
index f4f6c1a..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ImportLDIFReader.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- * Copyright 2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.messages.UtilityMessages.*;
-import static org.opends.server.util.StaticUtils.*;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.LocalizableMessageBuilder;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.util.Reject;
-import org.opends.server.api.plugin.PluginResult;
-import org.opends.server.types.AttributeBuilder;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.DN;
-import org.opends.server.types.Entry;
-import org.opends.server.types.LDIFImportConfig;
-import org.opends.server.types.ObjectClass;
-import org.opends.server.util.LDIFException;
-import org.opends.server.util.LDIFReader;
-
-/**
- * This class specializes the LDIFReader for imports.
- */
-final class ImportLDIFReader extends LDIFReader
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- private final RootContainer rootContainer;
-
- /**
- * Creates a new LDIF reader that will read information from the specified file.
- *
- * @param importConfig
- * The import configuration for this LDIF reader. It must not be <CODE>null</CODE>.
- * @param rootContainer
- * The root container needed to get the next entry ID.
- * @throws IOException
- * If a problem occurs while opening the LDIF file for reading.
- */
- public ImportLDIFReader(LDIFImportConfig importConfig, RootContainer rootContainer) throws IOException
- {
- super(importConfig);
- Reject.ifNull(importConfig, rootContainer);
- this.rootContainer = rootContainer;
- }
-
- /**
- * Reads the next entry from the LDIF source.
- *
- * @return The next entry read from the LDIF source, or <CODE>null</CODE> if the end of the LDIF
- * data is reached.
- * @param suffixesMap
- * A map of suffixes instances.
- * @param entryInfo
- * A object to hold information about the entry ID and what suffix was selected.
- * @throws IOException
- * If an I/O problem occurs while reading from the file.
- * @throws LDIFException
- * If the information read cannot be parsed as an LDIF entry.
- */
- public final Entry readEntry(Map<DN, Suffix> suffixesMap, Importer.EntryInformation entryInfo) throws IOException,
- LDIFException
- {
- final boolean checkSchema = importConfig.validateSchema();
- while (true)
- {
- LinkedList<StringBuilder> lines;
- DN entryDN;
- EntryID entryID;
- Suffix suffix;
- synchronized (this)
- {
- // Read the set of lines that make up the next entry.
- lines = readEntryLines();
- if (lines == null)
- {
- return null;
- }
- lastEntryBodyLines = lines;
- lastEntryHeaderLines = new LinkedList<>();
-
- // Read the DN of the entry and see if it is one that should be included
- // in the import.
- try
- {
- entryDN = readDN(lines);
- }
- catch (LDIFException e)
- {
- logger.traceException(e);
- continue;
- }
-
- if (entryDN == null)
- {
- // This should only happen if the LDIF starts with the "version:" line
- // and has a blank line immediately after that. In that case, simply
- // read and return the next entry.
- continue;
- }
- else if (!importConfig.includeEntry(entryDN))
- {
- logger.trace("Skipping entry %s because the DN is not one that "
- + "should be included based on the include and exclude branches.", entryDN);
- entriesRead.incrementAndGet();
- logToSkipWriter(lines, ERR_LDIF_SKIP.get(entryDN));
- continue;
- }
- suffix = Importer.getMatchSuffix(entryDN, suffixesMap);
- if (suffix == null)
- {
- logger.trace("Skipping entry %s because the DN is not one that "
- + "should be included based on a suffix match check.", entryDN);
- entriesRead.incrementAndGet();
- logToSkipWriter(lines, ERR_LDIF_SKIP.get(entryDN));
- continue;
- }
- entriesRead.incrementAndGet();
- entryID = rootContainer.getNextEntryID();
- suffix.addPending(entryDN);
- }
-
- // Create the entry and see if it is one that should be included in the import
- final Entry entry = createEntry(lines, entryDN, checkSchema, suffix);
- if (entry == null
- || !isIncludedInImport(entry, suffix, lines)
- || !invokeImportPlugins(entry, suffix, lines)
- || (checkSchema && !isValidAgainstSchema(entry, suffix, lines)))
- {
- continue;
- }
- entryInfo.setEntryID(entryID);
- entryInfo.setSuffix(suffix);
- // The entry should be included in the import, so return it.
- return entry;
- }
- }
-
- private Entry createEntry(List<StringBuilder> lines, DN entryDN, boolean checkSchema, Suffix suffix)
- {
- // Read the set of attributes from the entry.
- Map<ObjectClass, String> objectClasses = new HashMap<>();
- Map<AttributeType, List<AttributeBuilder>> userAttrBuilders = new HashMap<>();
- Map<AttributeType, List<AttributeBuilder>> operationalAttrBuilders = new HashMap<>();
- try
- {
- for (StringBuilder line : lines)
- {
- readAttribute(lines, line, entryDN, objectClasses, userAttrBuilders, operationalAttrBuilders, checkSchema);
- }
- }
- catch (LDIFException e)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("Skipping entry %s because reading" + "its attributes failed.", entryDN);
- }
- logToSkipWriter(lines, ERR_LDIF_READ_ATTR_SKIP.get(entryDN, e.getMessage()));
- suffix.removePending(entryDN);
- return null;
- }
-
- final Entry entry = new Entry(entryDN, objectClasses,
- toAttributesMap(userAttrBuilders), toAttributesMap(operationalAttrBuilders));
- logger.trace("readEntry(), created entry: %s", entry);
- return entry;
- }
-
- private boolean isIncludedInImport(Entry entry, Suffix suffix, LinkedList<StringBuilder> lines)
- {
- final DN entryDN = entry.getName();
- try
- {
- if (!importConfig.includeEntry(entry))
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("Skipping entry %s because the DN is not one that "
- + "should be included based on the include and exclude filters.", entryDN);
- }
- logToSkipWriter(lines, ERR_LDIF_SKIP.get(entryDN));
- suffix.removePending(entryDN);
- return false;
- }
- }
- catch (Exception e)
- {
- logger.traceException(e);
- suffix.removePending(entryDN);
- logToSkipWriter(lines,
- ERR_LDIF_COULD_NOT_EVALUATE_FILTERS_FOR_IMPORT.get(entry.getName(), lastEntryLineNumber, e));
- suffix.removePending(entryDN);
- return false;
- }
- return true;
- }
-
- private boolean invokeImportPlugins(final Entry entry, Suffix suffix, LinkedList<StringBuilder> lines)
- {
- if (importConfig.invokeImportPlugins())
- {
- PluginResult.ImportLDIF pluginResult = pluginConfigManager.invokeLDIFImportPlugins(importConfig, entry);
- if (!pluginResult.continueProcessing())
- {
- final DN entryDN = entry.getName();
- LocalizableMessage m;
- LocalizableMessage rejectMessage = pluginResult.getErrorMessage();
- if (rejectMessage == null)
- {
- m = ERR_LDIF_REJECTED_BY_PLUGIN_NOMESSAGE.get(entryDN);
- }
- else
- {
- m = ERR_LDIF_REJECTED_BY_PLUGIN.get(entryDN, rejectMessage);
- }
-
- logToRejectWriter(lines, m);
- suffix.removePending(entryDN);
- return false;
- }
- }
- return true;
- }
-
- private boolean isValidAgainstSchema(Entry entry, Suffix suffix, LinkedList<StringBuilder> lines)
- {
- final DN entryDN = entry.getName();
- addRDNAttributesIfNecessary(entryDN, entry.getUserAttributes(), entry.getOperationalAttributes());
- // Add any superior objectclass(s) missing in the objectclass map.
- addSuperiorObjectClasses(entry.getObjectClasses());
-
- LocalizableMessageBuilder invalidReason = new LocalizableMessageBuilder();
- if (!entry.conformsToSchema(null, false, true, false, invalidReason))
- {
- LocalizableMessage message = ERR_LDIF_SCHEMA_VIOLATION.get(entryDN, lastEntryLineNumber, invalidReason);
- logToRejectWriter(lines, message);
- suffix.removePending(entryDN);
- return false;
- }
- return true;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Importer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Importer.java
deleted file mode 100644
index e1284d6..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Importer.java
+++ /dev/null
@@ -1,4336 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2008-2010 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static com.sleepycat.je.EnvironmentConfig.*;
-
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.admin.std.meta.LocalDBIndexCfgDefn.IndexType.*;
-import static org.opends.server.backends.pluggable.SuffixContainer.*;
-import static org.opends.server.util.DynamicConstants.*;
-import static org.opends.server.util.ServerConstants.*;
-import static org.opends.server.util.StaticUtils.*;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.LocalizableMessageDescriptor.Arg2;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.util.Utils;
-import org.opends.server.admin.std.meta.LocalDBIndexCfgDefn.IndexType;
-import org.opends.server.admin.std.server.LocalDBBackendCfg;
-import org.opends.server.admin.std.server.LocalDBIndexCfg;
-import org.opends.server.api.DiskSpaceMonitorHandler;
-import org.opends.server.backends.RebuildConfig;
-import org.opends.server.backends.RebuildConfig.RebuildMode;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.core.ServerContext;
-import org.opends.server.extensions.DiskSpaceMonitor;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.DN;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.Entry;
-import org.opends.server.types.InitializationException;
-import org.opends.server.types.LDIFImportConfig;
-import org.opends.server.types.LDIFImportResult;
-import org.opends.server.util.Platform;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.Cursor;
-import com.sleepycat.je.CursorConfig;
-import com.sleepycat.je.Database;
-import com.sleepycat.je.DatabaseConfig;
-import com.sleepycat.je.DatabaseEntry;
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.DiskOrderedCursor;
-import com.sleepycat.je.DiskOrderedCursorConfig;
-import com.sleepycat.je.Environment;
-import com.sleepycat.je.EnvironmentConfig;
-import com.sleepycat.je.EnvironmentStats;
-import com.sleepycat.je.LockMode;
-import com.sleepycat.je.OperationStatus;
-import com.sleepycat.je.StatsConfig;
-import com.sleepycat.je.Transaction;
-import com.sleepycat.util.PackedInteger;
-
-/**
- * This class provides the engine that performs both importing of LDIF files and
- * the rebuilding of indexes.
- */
-final class Importer implements DiskSpaceMonitorHandler
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- private static final int TIMER_INTERVAL = 10000;
- private static final String DEFAULT_TMP_DIR = "import-tmp";
- private static final String TMPENV_DIR = "tmp-env";
-
- /** Defaults for DB cache. */
- private static final int MAX_DB_CACHE_SIZE = 8 * MB;
- private static final int MAX_DB_LOG_SIZE = 10 * MB;
- private static final int MIN_DB_CACHE_SIZE = 4 * MB;
-
- /**
- * Defaults for LDIF reader buffers, min memory required to import and default
- * size for byte buffers.
- */
- private static final int READER_WRITER_BUFFER_SIZE = 8 * KB;
- private static final int MIN_DB_CACHE_MEMORY = MAX_DB_CACHE_SIZE
- + MAX_DB_LOG_SIZE;
- private static final int BYTE_BUFFER_CAPACITY = 128;
-
- /** Max size of phase one buffer. */
- private static final int MAX_BUFFER_SIZE = 2 * MB;
- /** Min size of phase one buffer. */
- private static final int MIN_BUFFER_SIZE = 4 * KB;
- /** Min size of phase two read-ahead cache. */
- private static final int MIN_READ_AHEAD_CACHE_SIZE = 2 * KB;
- /** Small heap threshold used to give more memory to JVM to attempt OOM errors. */
- private static final int SMALL_HEAP_SIZE = 256 * MB;
- /** Minimum memory needed for import */
- private static final int MINIMUM_AVAILABLE_MEMORY = 32 * MB;
-
- /** The DN attribute type. */
- private static final AttributeType dnType = DirectoryServer.getAttributeTypeOrDefault("dn");
- static final IndexOutputBuffer.IndexComparator indexComparator =
- new IndexOutputBuffer.IndexComparator();
-
- /** Phase one buffer count. */
- private final AtomicInteger bufferCount = new AtomicInteger(0);
- /** Phase one imported entries count. */
- private final AtomicLong importCount = new AtomicLong(0);
-
- /** Phase one buffer size in bytes. */
- private int bufferSize;
-
- /** Temp scratch directory. */
- private final File tempDir;
-
- /** Index count. */
- private final int indexCount;
- /** Thread count. */
- private int threadCount;
-
- /** Set to true when validation is skipped. */
- private final boolean skipDNValidation;
-
- /** Temporary environment used when DN validation is done in first phase. */
- private final TmpEnv tmpEnv;
-
- /** Root container. */
- private RootContainer rootContainer;
-
- /** Import configuration. */
- private final LDIFImportConfig importConfiguration;
- /** Backend configuration. */
- private final LocalDBBackendCfg backendConfiguration;
-
- /** LDIF reader. */
- private ImportLDIFReader reader;
-
- /** Migrated entry count. */
- private int migratedCount;
-
- /** Size in bytes of temporary env. */
- private long tmpEnvCacheSize;
- /** Available memory at the start of the import. */
- private long availableMemory;
- /** Size in bytes of DB cache. */
- private long dbCacheSize;
-
- /** The executor service used for the buffer sort tasks. */
- private ExecutorService bufferSortService;
- /** The executor service used for the scratch file processing tasks. */
- private ExecutorService scratchFileWriterService;
-
- /** Queue of free index buffers -- used to re-cycle index buffers. */
- private final BlockingQueue<IndexOutputBuffer> freeBufferQueue = new LinkedBlockingQueue<>();
-
- /**
- * Map of index keys to index buffers. Used to allocate sorted index buffers
- * to a index writer thread.
- */
- private final Map<IndexKey, BlockingQueue<IndexOutputBuffer>> indexKeyQueueMap = new ConcurrentHashMap<>();
-
- /** Map of DB containers to index managers. Used to start phase 2. */
- private final List<IndexManager> indexMgrList = new LinkedList<>();
- /** Map of DB containers to DN-based index managers. Used to start phase 2. */
- private final List<IndexManager> DNIndexMgrList = new LinkedList<>();
-
- /**
- * Futures used to indicate when the index file writers are done flushing
- * their work queues and have exited. End of phase one.
- */
- private final List<Future<Void>> scratchFileWriterFutures;
- /**
- * List of index file writer tasks. Used to signal stopScratchFileWriters to
- * the index file writer tasks when the LDIF file has been done.
- */
- private final List<ScratchFileWriterTask> scratchFileWriterList;
-
- /** Map of DNs to Suffix objects. */
- private final Map<DN, Suffix> dnSuffixMap = new LinkedHashMap<>();
- /** Map of container ids to database containers. */
- private final ConcurrentHashMap<Integer, Index> idContainerMap = new ConcurrentHashMap<>();
- /** Map of container ids to entry containers. */
- private final ConcurrentHashMap<Integer, Suffix> idSuffixMap = new ConcurrentHashMap<>();
-
- /** Used to synchronize when a scratch file index writer is first setup. */
- private final Object synObj = new Object();
-
- /** Rebuild index manager used when rebuilding indexes. */
- private final RebuildIndexManager rebuildManager;
-
- /** Set to true if the backend was cleared. */
- private final boolean clearedBackend;
-
- /** Used to shutdown import if an error occurs in phase one. */
- private volatile boolean isCanceled;
- private volatile boolean isPhaseOneDone;
-
- /** Number of phase one buffers. */
- private int phaseOneBufferCount;
-
- private final DiskSpaceMonitor diskSpaceMonitor;
-
- /**
- * Create a new import job with the specified rebuild index config.
- *
- * @param rebuildConfig
- * The rebuild index configuration.
- * @param cfg
- * The local DB back-end configuration.
- * @param envConfig
- * The JEB environment config.
- * @param serverContext
- * The ServerContext for this Directory Server instance
- * @throws InitializationException
- * If a problem occurs during initialization.
- * @throws JebException
- * If an error occurred when opening the DB.
- * @throws ConfigException
- * If a problem occurs during initialization.
- */
- public Importer(RebuildConfig rebuildConfig, LocalDBBackendCfg cfg, EnvironmentConfig envConfig,
- ServerContext serverContext) throws InitializationException,
- JebException, ConfigException
- {
- this.importConfiguration = null;
- this.backendConfiguration = cfg;
- this.tmpEnv = null;
- this.threadCount = 1;
- this.diskSpaceMonitor = serverContext.getDiskSpaceMonitor();
- this.rebuildManager = new RebuildIndexManager(rebuildConfig, cfg);
- this.indexCount = rebuildManager.getIndexCount();
- this.clearedBackend = false;
- this.scratchFileWriterList = new ArrayList<>(indexCount);
- this.scratchFileWriterFutures = new CopyOnWriteArrayList<>();
-
- this.tempDir = getTempDir(cfg, rebuildConfig.getTmpDirectory());
- recursiveDelete(tempDir);
- if (!tempDir.exists() && !tempDir.mkdirs())
- {
- throw new InitializationException(ERR_IMPORT_CREATE_TMPDIR_ERROR.get(tempDir));
- }
- this.skipDNValidation = true;
- initializeDBEnv(envConfig);
- }
-
- /**
- * Create a new import job with the specified ldif import config.
- *
- * @param importConfiguration
- * The LDIF import configuration.
- * @param localDBBackendCfg
- * The local DB back-end configuration.
- * @param envConfig
- * The JEB environment config.
- * @param serverContext
- * The ServerContext for this Directory Server instance
- * @throws InitializationException
- * If a problem occurs during initialization.
- * @throws ConfigException
- * If a problem occurs reading the configuration.
- * @throws DatabaseException
- * If an error occurred when opening the DB.
- */
- public Importer(LDIFImportConfig importConfiguration, LocalDBBackendCfg localDBBackendCfg,
- EnvironmentConfig envConfig, ServerContext serverContext)
- throws InitializationException, ConfigException, DatabaseException
- {
- this.rebuildManager = null;
- this.importConfiguration = importConfiguration;
- this.backendConfiguration = localDBBackendCfg;
- this.diskSpaceMonitor = serverContext.getDiskSpaceMonitor();
-
- if (importConfiguration.getThreadCount() == 0)
- {
- this.threadCount = Runtime.getRuntime().availableProcessors() * 2;
- }
- else
- {
- this.threadCount = importConfiguration.getThreadCount();
- }
-
- // Determine the number of indexes.
- this.indexCount = getTotalIndexCount(localDBBackendCfg);
-
- this.clearedBackend = mustClearBackend(importConfiguration, localDBBackendCfg);
- this.scratchFileWriterList = new ArrayList<>(indexCount);
- this.scratchFileWriterFutures = new CopyOnWriteArrayList<>();
-
- this.tempDir = getTempDir(localDBBackendCfg, importConfiguration.getTmpDirectory());
- recursiveDelete(tempDir);
- if (!tempDir.exists() && !tempDir.mkdirs())
- {
- throw new InitializationException(ERR_IMPORT_CREATE_TMPDIR_ERROR.get(tempDir));
- }
- skipDNValidation = importConfiguration.getSkipDNValidation();
- initializeDBEnv(envConfig);
-
- // Set up temporary environment.
- if (!skipDNValidation)
- {
- File envPath = new File(tempDir, TMPENV_DIR);
- envPath.mkdirs();
- this.tmpEnv = new TmpEnv(envPath);
- }
- else
- {
- this.tmpEnv = null;
- }
- }
-
- /**
- * Returns whether the backend must be cleared.
- *
- * @param importCfg the import configuration object
- * @param backendCfg the backend configuration object
- * @return true if the backend must be cleared, false otherwise
- */
- public static boolean mustClearBackend(LDIFImportConfig importCfg, LocalDBBackendCfg backendCfg)
- {
- return !importCfg.appendToExistingData()
- && (importCfg.clearBackend() || backendCfg.getBaseDN().size() <= 1);
- }
-
- private File getTempDir(LocalDBBackendCfg localDBBackendCfg, String tmpDirectory)
- {
- File parentDir;
- if (tmpDirectory != null)
- {
- parentDir = getFileForPath(tmpDirectory);
- }
- else
- {
- parentDir = getFileForPath(DEFAULT_TMP_DIR);
- }
- return new File(parentDir, localDBBackendCfg.getBackendId());
- }
-
- private int getTotalIndexCount(LocalDBBackendCfg localDBBackendCfg)
- throws ConfigException
- {
- int indexes = 2; // dn2id, dn2uri
- for (String indexName : localDBBackendCfg.listLocalDBIndexes())
- {
- LocalDBIndexCfg index = localDBBackendCfg.getLocalDBIndex(indexName);
- SortedSet<IndexType> types = index.getIndexType();
- if (types.contains(IndexType.EXTENSIBLE))
- {
- indexes += types.size() - 1 + index.getIndexExtensibleMatchingRule().size();
- }
- else
- {
- indexes += types.size();
- }
- }
- return indexes;
- }
-
- /**
- * Return the suffix instance in the specified map that matches the specified
- * DN.
- *
- * @param dn
- * The DN to search for.
- * @param map
- * The map to search.
- * @return The suffix instance that matches the DN, or null if no match is
- * found.
- */
- public static Suffix getMatchSuffix(DN dn, Map<DN, Suffix> map)
- {
- Suffix suffix = null;
- DN nodeDN = dn;
-
- while (suffix == null && nodeDN != null)
- {
- suffix = map.get(nodeDN);
- if (suffix == null)
- {
- nodeDN = nodeDN.getParentDNInSuffix();
- }
- }
- return suffix;
- }
-
- /**
- * Calculate buffer sizes and initialize JEB properties based on memory.
- *
- * @param envConfig
- * The environment config to use in the calculations.
- * @throws InitializationException
- * If a problem occurs during calculation.
- */
- private void initializeDBEnv(EnvironmentConfig envConfig) throws InitializationException
- {
- // Calculate amount of usable memory. This will need to take into account
- // various fudge factors, including the number of IO buffers used by the
- // scratch writers (1 per index).
- calculateAvailableMemory();
-
- final long usableMemory = availableMemory - (indexCount * READER_WRITER_BUFFER_SIZE);
-
- // We need caching when doing DN validation or rebuilding indexes.
- if (!skipDNValidation || rebuildManager != null)
- {
- // No DN validation: calculate memory for DB cache, DN2ID temporary cache,
- // and buffers.
- if (System.getProperty(PROPERTY_RUNNING_UNIT_TESTS) != null)
- {
- dbCacheSize = 500 * KB;
- tmpEnvCacheSize = 500 * KB;
- }
- else if (usableMemory < (MIN_DB_CACHE_MEMORY + MIN_DB_CACHE_SIZE))
- {
- dbCacheSize = MIN_DB_CACHE_SIZE;
- tmpEnvCacheSize = MIN_DB_CACHE_SIZE;
- }
- else if (!clearedBackend)
- {
- // Appending to existing data so reserve extra memory for the DB cache
- // since it will be needed for dn2id queries.
- dbCacheSize = usableMemory * 33 / 100;
- tmpEnvCacheSize = usableMemory * 33 / 100;
- }
- else
- {
- dbCacheSize = MAX_DB_CACHE_SIZE;
- tmpEnvCacheSize = usableMemory * 66 / 100;
- }
- }
- else
- {
- // No DN validation: calculate memory for DB cache and buffers.
-
- // No need for DN2ID cache.
- tmpEnvCacheSize = 0;
-
- if (System.getProperty(PROPERTY_RUNNING_UNIT_TESTS) != null)
- {
- dbCacheSize = 500 * KB;
- }
- else if (usableMemory < MIN_DB_CACHE_MEMORY)
- {
- dbCacheSize = MIN_DB_CACHE_SIZE;
- }
- else
- {
- // No need to differentiate between append/clear backend, since dn2id is
- // not being queried.
- dbCacheSize = MAX_DB_CACHE_SIZE;
- }
- }
-
- final long phaseOneBufferMemory = usableMemory - dbCacheSize - tmpEnvCacheSize;
- final int oldThreadCount = threadCount;
- if (indexCount != 0) // Avoid / by zero
- {
- while (true)
- {
- phaseOneBufferCount = 2 * indexCount * threadCount;
-
- // Scratch writers allocate 4 buffers per index as well.
- final int totalPhaseOneBufferCount = phaseOneBufferCount + (4 * indexCount);
- long longBufferSize = phaseOneBufferMemory / totalPhaseOneBufferCount;
- // We need (2 * bufferSize) to fit in an int for the insertByteStream
- // and deleteByteStream constructors.
- bufferSize = (int) Math.min(longBufferSize, Integer.MAX_VALUE / 2);
-
- if (bufferSize > MAX_BUFFER_SIZE)
- {
- if (!skipDNValidation)
- {
- // The buffers are big enough: the memory is best used for the DN2ID temp DB
- bufferSize = MAX_BUFFER_SIZE;
-
- final long extraMemory = phaseOneBufferMemory - (totalPhaseOneBufferCount * bufferSize);
- if (!clearedBackend)
- {
- dbCacheSize += extraMemory / 2;
- tmpEnvCacheSize += extraMemory / 2;
- }
- else
- {
- tmpEnvCacheSize += extraMemory;
- }
- }
-
- break;
- }
- else if (bufferSize > MIN_BUFFER_SIZE)
- {
- // This is acceptable.
- break;
- }
- else if (threadCount > 1)
- {
- // Retry using less threads.
- threadCount--;
- }
- else
- {
- // Not enough memory.
- final long minimumPhaseOneBufferMemory = totalPhaseOneBufferCount * MIN_BUFFER_SIZE;
- LocalizableMessage message =
- ERR_IMPORT_LDIF_LACK_MEM.get(usableMemory,
- minimumPhaseOneBufferMemory + dbCacheSize + tmpEnvCacheSize);
- throw new InitializationException(message);
- }
- }
- }
-
- if (oldThreadCount != threadCount)
- {
- logger.info(NOTE_IMPORT_ADJUST_THREAD_COUNT, oldThreadCount, threadCount);
- }
-
- logger.info(NOTE_IMPORT_LDIF_TOT_MEM_BUF, availableMemory, phaseOneBufferCount);
- if (tmpEnvCacheSize > 0)
- {
- logger.info(NOTE_IMPORT_LDIF_TMP_ENV_MEM, tmpEnvCacheSize);
- }
- envConfig.setConfigParam(MAX_MEMORY, Long.toString(dbCacheSize));
- logger.info(NOTE_IMPORT_LDIF_DB_MEM_BUF_INFO, dbCacheSize, bufferSize);
- }
-
- /**
- * Calculates the amount of available memory which can be used by this import,
- * taking into account whether or not the import is running offline or online
- * as a task.
- */
- private void calculateAvailableMemory()
- {
- final long totalAvailableMemory;
- if (DirectoryServer.isRunning())
- {
- // Online import/rebuild.
- Runtime runTime = Runtime.getRuntime();
- // call twice gc to ensure finalizers are called
- // and young to old gen references are properly gc'd
- runTime.gc();
- runTime.gc();
- final long usedMemory = runTime.totalMemory() - runTime.freeMemory();
- final long maxUsableMemory = Platform.getUsableMemoryForCaching();
- final long usableMemory = maxUsableMemory - usedMemory;
-
- final long configuredMemory;
- if (backendConfiguration.getDBCacheSize() > 0)
- {
- configuredMemory = backendConfiguration.getDBCacheSize();
- }
- else
- {
- configuredMemory = backendConfiguration.getDBCachePercent() * Runtime.getRuntime().maxMemory() / 100;
- }
-
- // Round up to minimum of 32MB (e.g. unit tests only use a small cache).
- totalAvailableMemory = Math.max(Math.min(usableMemory, configuredMemory), MINIMUM_AVAILABLE_MEMORY);
- }
- else
- {
- // Offline import/rebuild.
- totalAvailableMemory = Platform.getUsableMemoryForCaching();
- }
-
- // Now take into account various fudge factors.
- int importMemPct = 90;
- if (totalAvailableMemory <= SMALL_HEAP_SIZE)
- {
- // Be pessimistic when memory is low.
- importMemPct -= 25;
- }
- if (rebuildManager != null)
- {
- // Rebuild seems to require more overhead.
- importMemPct -= 15;
- }
-
- availableMemory = totalAvailableMemory * importMemPct / 100;
- }
-
- private void initializeIndexBuffers()
- {
- for (int i = 0; i < phaseOneBufferCount; i++)
- {
- freeBufferQueue.add(new IndexOutputBuffer(bufferSize));
- }
- }
-
- private void initializeSuffixes() throws DatabaseException, ConfigException,
- InitializationException
- {
- for (EntryContainer ec : rootContainer.getEntryContainers())
- {
- Suffix suffix = getSuffix(ec);
- if (suffix != null)
- {
- dnSuffixMap.put(ec.getBaseDN(), suffix);
- }
- }
- }
-
- /**
- * Mainly used to support multiple suffixes. Each index in each suffix gets an
- * unique ID to identify which DB it needs to go to in phase two processing.
- */
- private void generateIndexID(Suffix suffix)
- {
- for (AttributeIndex attributeIndex : suffix.getAttributeIndexes())
- {
- for(Index index : attributeIndex.getAllIndexes()) {
- putInIdContainerMap(index);
- }
- }
- }
-
- private void putInIdContainerMap(Index index)
- {
- if (index != null)
- {
- idContainerMap.putIfAbsent(getIndexID(index), index);
- }
- }
-
- private static int getIndexID(DatabaseContainer index)
- {
- return System.identityHashCode(index);
- }
-
- private Suffix getSuffix(EntryContainer entryContainer)
- throws ConfigException, InitializationException
- {
- DN baseDN = entryContainer.getBaseDN();
- EntryContainer sourceEntryContainer = null;
- List<DN> includeBranches = new ArrayList<>();
- List<DN> excludeBranches = new ArrayList<>();
-
- if (!importConfiguration.appendToExistingData()
- && !importConfiguration.clearBackend())
- {
- for (DN dn : importConfiguration.getExcludeBranches())
- {
- if (baseDN.equals(dn))
- {
- // This entire base DN was explicitly excluded. Skip.
- return null;
- }
- if (baseDN.isAncestorOf(dn))
- {
- excludeBranches.add(dn);
- }
- }
-
- if (!importConfiguration.getIncludeBranches().isEmpty())
- {
- for (DN dn : importConfiguration.getIncludeBranches())
- {
- if (baseDN.isAncestorOf(dn))
- {
- includeBranches.add(dn);
- }
- }
-
- if (includeBranches.isEmpty())
- {
- /*
- * There are no branches in the explicitly defined include list under
- * this base DN. Skip this base DN all together.
- */
- return null;
- }
-
- // Remove any overlapping include branches.
- Iterator<DN> includeBranchIterator = includeBranches.iterator();
- while (includeBranchIterator.hasNext())
- {
- DN includeDN = includeBranchIterator.next();
- if (!isAnyNotEqualAndAncestorOf(includeBranches, includeDN))
- {
- includeBranchIterator.remove();
- }
- }
-
- // Remove any exclude branches that are not are not under a include
- // branch since they will be migrated as part of the existing entries
- // outside of the include branches anyways.
- Iterator<DN> excludeBranchIterator = excludeBranches.iterator();
- while (excludeBranchIterator.hasNext())
- {
- DN excludeDN = excludeBranchIterator.next();
- if (!isAnyAncestorOf(includeBranches, excludeDN))
- {
- excludeBranchIterator.remove();
- }
- }
-
- if (excludeBranches.isEmpty()
- && includeBranches.size() == 1
- && includeBranches.get(0).equals(baseDN))
- {
- // This entire base DN is explicitly included in the import with
- // no exclude branches that we need to migrate. Just clear the entry
- // container.
- clearSuffix(entryContainer);
- }
- else
- {
- // Create a temp entry container
- sourceEntryContainer = entryContainer;
- entryContainer = rootContainer.openEntryContainer(baseDN, baseDN.toNormalizedUrlSafeString()
- + "_importTmp");
- }
- }
- }
- return new Suffix(entryContainer, sourceEntryContainer, includeBranches, excludeBranches);
- }
-
- private void clearSuffix(EntryContainer entryContainer)
- {
- entryContainer.lock();
- entryContainer.clear();
- entryContainer.unlock();
- }
-
- private boolean isAnyNotEqualAndAncestorOf(List<DN> dns, DN childDN)
- {
- for (DN dn : dns)
- {
- if (!dn.equals(childDN) && dn.isAncestorOf(childDN))
- {
- return false;
- }
- }
- return true;
- }
-
- private boolean isAnyAncestorOf(List<DN> dns, DN childDN)
- {
- for (DN dn : dns)
- {
- if (dn.isAncestorOf(childDN))
- {
- return true;
- }
- }
- return false;
- }
-
- /**
- * Rebuild the indexes using the specified root container.
- *
- * @param rootContainer
- * The root container to rebuild indexes in.
- * @throws ConfigException
- * If a configuration error occurred.
- * @throws InitializationException
- * If an initialization error occurred.
- * @throws JebException
- * If the JEB database had an error.
- * @throws InterruptedException
- * If an interrupted error occurred.
- * @throws ExecutionException
- * If an execution error occurred.
- */
- public void rebuildIndexes(RootContainer rootContainer)
- throws ConfigException, InitializationException, JebException,
- InterruptedException, ExecutionException
- {
- this.rootContainer = rootContainer;
- long startTime = System.currentTimeMillis();
-
- updateDiskMonitor(tempDir, "backend index rebuild tmp directory");
- File parentDirectory = getFileForPath(backendConfiguration.getDBDirectory());
- File backendDirectory = new File(parentDirectory, backendConfiguration.getBackendId());
- updateDiskMonitor(backendDirectory, "backend index rebuild DB directory");
-
- try
- {
- rebuildManager.initialize();
- rebuildManager.printStartMessage();
- rebuildManager.rebuildIndexes();
- recursiveDelete(tempDir);
- rebuildManager.printStopMessage(startTime);
- }
- finally
- {
- diskSpaceMonitor.deregisterMonitoredDirectory(tempDir, this);
- diskSpaceMonitor.deregisterMonitoredDirectory(backendDirectory, this);
- }
- }
-
- /**
- * Import a LDIF using the specified root container.
- *
- * @param rootContainer
- * The root container to use during the import.
- * @return A LDIF result.
- * @throws ConfigException
- * If the import failed because of an configuration error.
- * @throws InitializationException
- * If the import failed because of an initialization error.
- * @throws JebException
- * If the import failed due to a database error.
- * @throws InterruptedException
- * If the import failed due to an interrupted error.
- * @throws ExecutionException
- * If the import failed due to an execution error.
- */
- public LDIFImportResult processImport(RootContainer rootContainer)
- throws ConfigException, InitializationException, JebException,
- InterruptedException, ExecutionException
- {
- this.rootContainer = rootContainer;
- File parentDirectory = getFileForPath(backendConfiguration.getDBDirectory());
- File backendDirectory = new File(parentDirectory, backendConfiguration.getBackendId());
- try {
- try
- {
- reader = new ImportLDIFReader(importConfiguration, rootContainer);
- }
- catch (IOException ioe)
- {
- throw new InitializationException(ERR_IMPORT_LDIF_READER_IO_ERROR.get(), ioe);
- }
-
- updateDiskMonitor(tempDir, "backend import tmp directory");
- updateDiskMonitor(backendDirectory, "backend import DB directory");
-
- logger.info(NOTE_IMPORT_STARTING, DirectoryServer.getVersionString(), BUILD_ID, REVISION);
- logger.info(NOTE_IMPORT_THREAD_COUNT, threadCount);
- initializeSuffixes();
- setupIndexesForImport();
-
- final long startTime = System.currentTimeMillis();
- phaseOne();
- isPhaseOneDone = true;
- final long phaseOneFinishTime = System.currentTimeMillis();
-
- if (!skipDNValidation)
- {
- tmpEnv.shutdown();
- }
- if (isCanceled)
- {
- throw new InterruptedException("Import processing canceled.");
- }
-
- final long phaseTwoTime = System.currentTimeMillis();
- phaseTwo();
- if (isCanceled)
- {
- throw new InterruptedException("Import processing canceled.");
- }
- final long phaseTwoFinishTime = System.currentTimeMillis();
-
- setIndexesTrusted();
- switchEntryContainers();
- recursiveDelete(tempDir);
- final long finishTime = System.currentTimeMillis();
- final long importTime = finishTime - startTime;
- logger.info(NOTE_IMPORT_PHASE_STATS, importTime / 1000,
- (phaseOneFinishTime - startTime) / 1000,
- (phaseTwoFinishTime - phaseTwoTime) / 1000);
- float rate = 0;
- if (importTime > 0)
- {
- rate = 1000f * reader.getEntriesRead() / importTime;
- }
- logger.info(NOTE_IMPORT_FINAL_STATUS, reader.getEntriesRead(), importCount.get(),
- reader.getEntriesIgnored(), reader.getEntriesRejected(),
- migratedCount, importTime / 1000, rate);
- return new LDIFImportResult(reader.getEntriesRead(),
- reader.getEntriesRejected(), reader.getEntriesIgnored());
- }
- finally
- {
- close(reader);
- if (!skipDNValidation)
- {
- try
- {
- tmpEnv.shutdown();
- }
- catch (Exception ignored)
- {
- // Do nothing.
- }
- }
- diskSpaceMonitor.deregisterMonitoredDirectory(tempDir, this);
- diskSpaceMonitor.deregisterMonitoredDirectory(backendDirectory, this);
- }
- }
-
- private void updateDiskMonitor(File dir, String backendSuffix)
- {
- diskSpaceMonitor.registerMonitoredDirectory(backendConfiguration.getBackendId() + " " + backendSuffix, dir,
- backendConfiguration.getDiskLowThreshold(), backendConfiguration.getDiskFullThreshold(), this);
- }
-
- private void recursiveDelete(File dir)
- {
- if (dir.listFiles() != null)
- {
- for (File f : dir.listFiles())
- {
- if (f.isDirectory())
- {
- recursiveDelete(f);
- }
- f.delete();
- }
- }
- dir.delete();
- }
-
- private void switchEntryContainers() throws DatabaseException, JebException, InitializationException
- {
- for (Suffix suffix : dnSuffixMap.values())
- {
- DN baseDN = suffix.getBaseDN();
- EntryContainer entryContainer = suffix.getSrcEntryContainer();
- if (entryContainer != null)
- {
- final EntryContainer toDelete = rootContainer.unregisterEntryContainer(baseDN);
- toDelete.lock();
- toDelete.close();
- toDelete.delete();
- toDelete.unlock();
-
- final EntryContainer replacement = suffix.getEntryContainer();
- replacement.lock();
- replacement.setDatabasePrefix(baseDN.toNormalizedUrlSafeString());
- replacement.unlock();
- rootContainer.registerEntryContainer(baseDN, replacement);
- }
- }
- }
-
- private void setIndexesTrusted() throws JebException
- {
- try
- {
- for (Suffix s : dnSuffixMap.values())
- {
- s.setIndexesTrusted();
- }
- }
- catch (DatabaseException ex)
- {
- throw new JebException(NOTE_IMPORT_LDIF_TRUSTED_FAILED.get(ex.getMessage()));
- }
- }
-
- private void setupIndexesForImport()
- {
- for (Suffix s : dnSuffixMap.values())
- {
- s.setIndexesNotTrusted(importConfiguration.appendToExistingData());
- generateIndexID(s);
- }
- }
-
- private void phaseOne() throws InterruptedException, ExecutionException
- {
- initializeIndexBuffers();
-
- final ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor(1);
- scheduleAtFixedRate(timerService, new FirstPhaseProgressTask());
- scratchFileWriterService = Executors.newFixedThreadPool(2 * indexCount);
- bufferSortService = Executors.newFixedThreadPool(threadCount);
- final ExecutorService execService = Executors.newFixedThreadPool(threadCount);
-
- final List<Callable<Void>> tasks = new ArrayList<>(threadCount);
- tasks.add(new MigrateExistingTask());
- getAll(execService.invokeAll(tasks));
- tasks.clear();
-
- if (importConfiguration.appendToExistingData()
- && importConfiguration.replaceExistingEntries())
- {
- for (int i = 0; i < threadCount; i++)
- {
- tasks.add(new AppendReplaceTask());
- }
- }
- else
- {
- for (int i = 0; i < threadCount; i++)
- {
- tasks.add(new ImportTask());
- }
- }
- getAll(execService.invokeAll(tasks));
- tasks.clear();
-
- tasks.add(new MigrateExcludedTask());
- getAll(execService.invokeAll(tasks));
-
- stopScratchFileWriters();
- getAll(scratchFileWriterFutures);
-
- shutdownAll(timerService, execService, bufferSortService, scratchFileWriterService);
-
- // Try to clear as much memory as possible.
- clearAll(scratchFileWriterList, scratchFileWriterFutures, freeBufferQueue);
- indexKeyQueueMap.clear();
- }
-
- private void scheduleAtFixedRate(ScheduledThreadPoolExecutor timerService, Runnable task)
- {
- timerService.scheduleAtFixedRate(task, TIMER_INTERVAL, TIMER_INTERVAL, TimeUnit.MILLISECONDS);
- }
-
- private void shutdownAll(ExecutorService... executorServices) throws InterruptedException
- {
- for (ExecutorService executorService : executorServices)
- {
- executorService.shutdown();
- }
- for (ExecutorService executorService : executorServices)
- {
- executorService.awaitTermination(30, TimeUnit.SECONDS);
- }
- }
-
- private void clearAll(Collection<?>... cols)
- {
- for (Collection<?> col : cols)
- {
- col.clear();
- }
- }
-
- private void phaseTwo() throws InterruptedException, ExecutionException
- {
- ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor(1);
- scheduleAtFixedRate(timerService, new SecondPhaseProgressTask(reader.getEntriesRead()));
- try
- {
- processIndexFiles();
- }
- finally
- {
- shutdownAll(timerService);
- }
- }
-
- private void processIndexFiles() throws InterruptedException, ExecutionException
- {
- if (bufferCount.get() == 0)
- {
- return;
- }
- int dbThreads = Runtime.getRuntime().availableProcessors();
- if (dbThreads < 4)
- {
- dbThreads = 4;
- }
-
- // Calculate memory / buffer counts.
- final long usableMemory = availableMemory - dbCacheSize;
- int readAheadSize;
- int buffers;
- while (true)
- {
- final List<IndexManager> allIndexMgrs = new ArrayList<>(DNIndexMgrList);
- allIndexMgrs.addAll(indexMgrList);
- Collections.sort(allIndexMgrs, Collections.reverseOrder());
-
- buffers = 0;
- final int limit = Math.min(dbThreads, allIndexMgrs.size());
- for (int i = 0; i < limit; i++)
- {
- buffers += allIndexMgrs.get(i).numberOfBuffers;
- }
-
- readAheadSize = (int) (usableMemory / buffers);
- if (readAheadSize > bufferSize)
- {
- // Cache size is never larger than the buffer size.
- readAheadSize = bufferSize;
- break;
- }
- else if (readAheadSize > MIN_READ_AHEAD_CACHE_SIZE)
- {
- // This is acceptable.
- break;
- }
- else if (dbThreads > 1)
- {
- // Reduce thread count.
- dbThreads--;
- }
- else
- {
- // Not enough memory - will need to do batching for the biggest indexes.
- readAheadSize = MIN_READ_AHEAD_CACHE_SIZE;
- buffers = (int) (usableMemory / readAheadSize);
-
- logger.warn(WARN_IMPORT_LDIF_LACK_MEM_PHASE_TWO, usableMemory);
- break;
- }
- }
-
- // Ensure that there are minimum two threads available for parallel
- // processing of smaller indexes.
- dbThreads = Math.max(2, dbThreads);
-
- logger.info(NOTE_IMPORT_LDIF_PHASE_TWO_MEM_REPORT, availableMemory, readAheadSize, buffers);
-
- // Start indexing tasks.
- List<Future<Void>> futures = new LinkedList<>();
- ExecutorService dbService = Executors.newFixedThreadPool(dbThreads);
- Semaphore permits = new Semaphore(buffers);
-
- // Start DN processing first.
- submitIndexDBWriteTasks(DNIndexMgrList, dbService, permits, buffers, readAheadSize, futures);
- submitIndexDBWriteTasks(indexMgrList, dbService, permits, buffers, readAheadSize, futures);
- getAll(futures);
- shutdownAll(dbService);
- }
-
- private void submitIndexDBWriteTasks(List<IndexManager> indexMgrs, ExecutorService dbService, Semaphore permits,
- int buffers, int readAheadSize, List<Future<Void>> futures)
- {
- for (IndexManager indexMgr : indexMgrs)
- {
- futures.add(dbService.submit(new IndexDBWriteTask(indexMgr, permits, buffers, readAheadSize)));
- }
- }
-
- private <T> void getAll(List<Future<T>> futures) throws InterruptedException, ExecutionException
- {
- for (Future<?> result : futures)
- {
- result.get();
- }
- }
-
- private void stopScratchFileWriters()
- {
- final IndexOutputBuffer stopProcessing = IndexOutputBuffer.poison();
- for (ScratchFileWriterTask task : scratchFileWriterList)
- {
- task.queue.add(stopProcessing);
- }
- }
-
- /** Task used to migrate excluded branch. */
- private final class MigrateExcludedTask extends ImportTask
- {
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws Exception
- {
- for (Suffix suffix : dnSuffixMap.values())
- {
- EntryContainer entryContainer = suffix.getSrcEntryContainer();
- if (entryContainer != null && !suffix.getExcludeBranches().isEmpty())
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- LockMode lockMode = LockMode.DEFAULT;
- logger.info(NOTE_IMPORT_MIGRATION_START, "excluded", suffix.getBaseDN());
- Cursor cursor = entryContainer.getDN2ID().openCursor(null, CursorConfig.READ_COMMITTED);
- Comparator<byte[]> comparator = entryContainer.getDN2ID().getComparator();
- try
- {
- for (DN excludedDN : suffix.getExcludeBranches())
- {
- byte[] bytes = JebFormat.dnToDNKey(excludedDN, suffix.getBaseDN().size());
- key.setData(bytes);
- OperationStatus status = cursor.getSearchKeyRange(key, data, lockMode);
- if (status == OperationStatus.SUCCESS
- && Arrays.equals(key.getData(), bytes))
- {
- // This is the base entry for a branch that was excluded in the
- // import so we must migrate all entries in this branch over to
- // the new entry container.
- byte[] end = Arrays.copyOf(bytes, bytes.length + 1);
- end[end.length - 1] = 0x01;
-
- while (status == OperationStatus.SUCCESS
- && comparator.compare(key.getData(), end) < 0
- && !importConfiguration.isCancelled() && !isCanceled)
- {
- EntryID id = new EntryID(data);
- Entry entry = entryContainer.getID2Entry().get(null, id, LockMode.DEFAULT);
- processEntry(entry, rootContainer.getNextEntryID(), suffix);
- migratedCount++;
- status = cursor.getNext(key, data, lockMode);
- }
- }
- }
- flushIndexBuffers();
- }
- catch (Exception e)
- {
- logger.error(ERR_IMPORT_LDIF_MIGRATE_EXCLUDED_TASK_ERR, e.getMessage());
- isCanceled = true;
- throw e;
- }
- finally
- {
- close(cursor);
- }
- }
- }
- return null;
- }
- }
-
- /** Task to migrate existing entries. */
- private final class MigrateExistingTask extends ImportTask
- {
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws Exception
- {
- for (Suffix suffix : dnSuffixMap.values())
- {
- EntryContainer entryContainer = suffix.getSrcEntryContainer();
- if (entryContainer != null && !suffix.getIncludeBranches().isEmpty())
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- LockMode lockMode = LockMode.DEFAULT;
- logger.info(NOTE_IMPORT_MIGRATION_START, "existing", suffix.getBaseDN());
- Cursor cursor = entryContainer.getDN2ID().openCursor(null, null);
- try
- {
- final List<byte[]> includeBranches = includeBranchesAsBytes(suffix);
- OperationStatus status = cursor.getFirst(key, data, lockMode);
- while (status == OperationStatus.SUCCESS
- && !importConfiguration.isCancelled() && !isCanceled)
- {
- if (!find(includeBranches, key.getData()))
- {
- EntryID id = new EntryID(data);
- Entry entry = entryContainer.getID2Entry().get(null, id, LockMode.DEFAULT);
- processEntry(entry, rootContainer.getNextEntryID(), suffix);
- migratedCount++;
- status = cursor.getNext(key, data, lockMode);
- }
- else
- {
- // This is the base entry for a branch that will be included
- // in the import so we don't want to copy the branch to the
- // new entry container.
-
- /*
- * Advance the cursor to next entry at the same level in the DIT
- * skipping all the entries in this branch. Set the next
- * starting value to a value of equal length but slightly
- * greater than the previous DN. Since keys are compared in
- * reverse order we must set the first byte (the comma). No
- * possibility of overflow here.
- */
- byte[] begin = Arrays.copyOf(key.getData(), key.getSize() + 1);
- begin[begin.length - 1] = 0x01;
- key.setData(begin);
- status = cursor.getSearchKeyRange(key, data, lockMode);
- }
- }
- flushIndexBuffers();
- }
- catch (Exception e)
- {
- logger.error(ERR_IMPORT_LDIF_MIGRATE_EXISTING_TASK_ERR, e.getMessage());
- isCanceled = true;
- throw e;
- }
- finally
- {
- close(cursor);
- }
- }
- }
- return null;
- }
-
- private List<byte[]> includeBranchesAsBytes(Suffix suffix)
- {
- List<byte[]> includeBranches = new ArrayList<>(suffix.getIncludeBranches().size());
- for (DN includeBranch : suffix.getIncludeBranches())
- {
- if (includeBranch.isDescendantOf(suffix.getBaseDN()))
- {
- includeBranches.add(JebFormat.dnToDNKey(includeBranch, suffix.getBaseDN().size()));
- }
- }
- return includeBranches;
- }
-
- private boolean find(List<byte[]> arrays, byte[] arrayToFind)
- {
- for (byte[] array : arrays)
- {
- if (Arrays.equals(array, arrayToFind))
- {
- return true;
- }
- }
- return false;
- }
- }
-
- /**
- * Task to perform append/replace processing.
- */
- private class AppendReplaceTask extends ImportTask
- {
- private final Set<ByteString> insertKeySet = new HashSet<>();
- private final Set<ByteString> deleteKeySet = new HashSet<>();
- private final EntryInformation entryInfo = new EntryInformation();
- private Entry oldEntry;
- private EntryID entryID;
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws Exception
- {
- try
- {
- while (true)
- {
- if (importConfiguration.isCancelled() || isCanceled)
- {
- freeBufferQueue.add(IndexOutputBuffer.poison());
- return null;
- }
- oldEntry = null;
- Entry entry = reader.readEntry(dnSuffixMap, entryInfo);
- if (entry == null)
- {
- break;
- }
- entryID = entryInfo.getEntryID();
- Suffix suffix = entryInfo.getSuffix();
- processEntry(entry, suffix);
- }
- flushIndexBuffers();
- return null;
- }
- catch (Exception e)
- {
- logger.error(ERR_IMPORT_LDIF_APPEND_REPLACE_TASK_ERR, e.getMessage());
- isCanceled = true;
- throw e;
- }
- }
-
- void processEntry(Entry entry, Suffix suffix)
- throws DatabaseException, DirectoryException, JebException, InterruptedException
- {
- DN entryDN = entry.getName();
- DN2ID dn2id = suffix.getDN2ID();
- EntryID oldID = dn2id.get(null, entryDN, LockMode.DEFAULT);
- if (oldID != null)
- {
- oldEntry = suffix.getID2Entry().get(null, oldID, LockMode.DEFAULT);
- }
- if (oldEntry == null)
- {
- if (!skipDNValidation && !dnSanityCheck(entryDN, entry, suffix))
- {
- suffix.removePending(entryDN);
- return;
- }
- suffix.removePending(entryDN);
- processDN2ID(suffix, entryDN, entryID);
- }
- else
- {
- suffix.removePending(entryDN);
- entryID = oldID;
- }
- processDN2URI(suffix, oldEntry, entry);
- suffix.getID2Entry().put(null, entryID, entry);
- if (oldEntry != null)
- {
- processAllIndexes(suffix, entry, entryID);
- }
- else
- {
- processIndexes(suffix, entry, entryID);
- }
- importCount.getAndIncrement();
- }
-
- void processAllIndexes(Suffix suffix, Entry entry, EntryID entryID)
- throws DatabaseException, DirectoryException, JebException, InterruptedException
- {
- for (AttributeIndex attrIndex : suffix.getAttributeIndexes())
- {
- fillIndexKey(suffix, attrIndex, entry, attrIndex.getAttributeType(), entryID);
- }
- }
-
- @Override
- void processAttribute(Index index, Entry entry, EntryID entryID, IndexKey indexKey)
- throws DatabaseException, InterruptedException
- {
- if (oldEntry != null)
- {
- deleteKeySet.clear();
- index.indexEntry(oldEntry, deleteKeySet);
- for (ByteString delKey : deleteKeySet)
- {
- processKey(index, delKey.toByteArray(), entryID, indexKey, false);
- }
- }
- insertKeySet.clear();
- index.indexEntry(entry, insertKeySet);
- for (ByteString key : insertKeySet)
- {
- processKey(index, key.toByteArray(), entryID, indexKey, true);
- }
- }
- }
-
- /**
- * This task performs phase reading and processing of the entries read from
- * the LDIF file(s). This task is used if the append flag wasn't specified.
- */
- private class ImportTask implements Callable<Void>
- {
- private final Map<IndexKey, IndexOutputBuffer> indexBufferMap = new HashMap<>();
- private final Set<ByteString> insertKeySet = new HashSet<>();
- private final EntryInformation entryInfo = new EntryInformation();
- private final IndexKey dnIndexKey = new IndexKey(dnType, ImportIndexType.DN.toString(), 1);
- private DatabaseEntry keyEntry = new DatabaseEntry();
- private DatabaseEntry valEntry = new DatabaseEntry();
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws Exception
- {
- try
- {
- while (true)
- {
- if (importConfiguration.isCancelled() || isCanceled)
- {
- freeBufferQueue.add(IndexOutputBuffer.poison());
- return null;
- }
- Entry entry = reader.readEntry(dnSuffixMap, entryInfo);
- if (entry == null)
- {
- break;
- }
- EntryID entryID = entryInfo.getEntryID();
- Suffix suffix = entryInfo.getSuffix();
- processEntry(entry, entryID, suffix);
- }
- flushIndexBuffers();
- return null;
- }
- catch (Exception e)
- {
- logger.error(ERR_IMPORT_LDIF_IMPORT_TASK_ERR, e.getMessage());
- isCanceled = true;
- throw e;
- }
- }
-
- void processEntry(Entry entry, EntryID entryID, Suffix suffix)
- throws DatabaseException, DirectoryException, JebException, InterruptedException
- {
- DN entryDN = entry.getName();
- if (!skipDNValidation && !dnSanityCheck(entryDN, entry, suffix))
- {
- suffix.removePending(entryDN);
- return;
- }
- suffix.removePending(entryDN);
- processDN2ID(suffix, entryDN, entryID);
- processDN2URI(suffix, null, entry);
- processIndexes(suffix, entry, entryID);
- suffix.getID2Entry().put(null, entryID, entry);
- importCount.getAndIncrement();
- }
-
- /** Examine the DN for duplicates and missing parents. */
- boolean dnSanityCheck(DN entryDN, Entry entry, Suffix suffix)
- throws JebException, InterruptedException
- {
- //Perform parent checking.
- DN parentDN = suffix.getEntryContainer().getParentWithinBase(entryDN);
- if (parentDN != null && !suffix.isParentProcessed(parentDN, tmpEnv, clearedBackend))
- {
- reader.rejectEntry(entry, ERR_IMPORT_PARENT_NOT_FOUND.get(parentDN));
- return false;
- }
- //If the backend was not cleared, then the dn2id needs to checked first
- //for DNs that might not exist in the DN cache. If the DN is not in
- //the suffixes dn2id DB, then the dn cache is used.
- if (!clearedBackend)
- {
- EntryID id = suffix.getDN2ID().get(null, entryDN, LockMode.DEFAULT);
- if (id != null || !tmpEnv.insert(entryDN, keyEntry, valEntry))
- {
- reader.rejectEntry(entry, WARN_IMPORT_ENTRY_EXISTS.get());
- return false;
- }
- }
- else if (!tmpEnv.insert(entryDN, keyEntry, valEntry))
- {
- reader.rejectEntry(entry, WARN_IMPORT_ENTRY_EXISTS.get());
- return false;
- }
- return true;
- }
-
- void processIndexes(Suffix suffix, Entry entry, EntryID entryID)
- throws DatabaseException, DirectoryException, JebException, InterruptedException
- {
- for (AttributeIndex attrIndex : suffix.getAttributeIndexes())
- {
- AttributeType attributeType = attrIndex.getAttributeType();
- if (entry.hasAttribute(attributeType))
- {
- fillIndexKey(suffix, attrIndex, entry, attributeType, entryID);
- }
- }
- }
-
- void fillIndexKey(Suffix suffix, AttributeIndex attrIndex, Entry entry, AttributeType attrType, EntryID entryID)
- throws DatabaseException, InterruptedException, DirectoryException, JebException
- {
- for(Index index : attrIndex.getAllIndexes()) {
- processAttribute(index, entry, attrType, entryID);
- }
-
- for (VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes())
- {
- Transaction transaction = null;
- vlvIdx.addEntry(transaction, entryID, entry);
- }
- }
-
- private void processAttribute(Index index, Entry entry, AttributeType attributeType, EntryID entryID)
- throws InterruptedException
- {
- if (index != null)
- {
- processAttribute(index, entry, entryID,
- new IndexKey(attributeType, index.getName(), index.getIndexEntryLimit()));
- }
- }
-
- private void processAttributes(Collection<Index> indexes, Entry entry, AttributeType attributeType, EntryID entryID)
- throws InterruptedException
- {
- if (indexes != null)
- {
- for (Index index : indexes)
- {
- processAttribute(index, entry, entryID,
- new IndexKey(attributeType, index.getName(), index.getIndexEntryLimit()));
- }
- }
- }
-
- void processAttribute(Index index, Entry entry, EntryID entryID, IndexKey indexKey)
- throws DatabaseException, InterruptedException
- {
- insertKeySet.clear();
- index.indexEntry(entry, insertKeySet);
- for (ByteString key : insertKeySet)
- {
- processKey(index, key.toByteArray(), entryID, indexKey, true);
- }
- }
-
- void flushIndexBuffers() throws InterruptedException, ExecutionException
- {
- final ArrayList<Future<Void>> futures = new ArrayList<>();
- Iterator<Map.Entry<IndexKey, IndexOutputBuffer>> it = indexBufferMap.entrySet().iterator();
- while (it.hasNext())
- {
- Map.Entry<IndexKey, IndexOutputBuffer> e = it.next();
- IndexKey indexKey = e.getKey();
- IndexOutputBuffer indexBuffer = e.getValue();
- it.remove();
- indexBuffer.setIndexKey(indexKey);
- indexBuffer.discard();
- futures.add(bufferSortService.submit(new SortTask(indexBuffer)));
- }
- getAll(futures);
- }
-
- int processKey(DatabaseContainer container, byte[] key, EntryID entryID,
- IndexKey indexKey, boolean insert) throws InterruptedException
- {
- int sizeNeeded = IndexOutputBuffer.getRequiredSize(key.length, entryID.longValue());
- IndexOutputBuffer indexBuffer = indexBufferMap.get(indexKey);
- if (indexBuffer == null)
- {
- indexBuffer = getNewIndexBuffer(sizeNeeded);
- indexBufferMap.put(indexKey, indexBuffer);
- }
- else if (!indexBuffer.isSpaceAvailable(key, entryID.longValue()))
- {
- // complete the current buffer...
- indexBuffer.setIndexKey(indexKey);
- bufferSortService.submit(new SortTask(indexBuffer));
- // ... and get a new one
- indexBuffer = getNewIndexBuffer(sizeNeeded);
- indexBufferMap.put(indexKey, indexBuffer);
- }
- int indexID = getIndexID(container);
- indexBuffer.add(key, entryID, indexID, insert);
- return indexID;
- }
-
- IndexOutputBuffer getNewIndexBuffer(int size) throws InterruptedException
- {
- IndexOutputBuffer indexBuffer;
- if (size > bufferSize)
- {
- indexBuffer = new IndexOutputBuffer(size);
- indexBuffer.discard();
- }
- else
- {
- indexBuffer = freeBufferQueue.take();
- if (indexBuffer == null)
- {
- throw new InterruptedException("Index buffer processing error.");
- }
- }
- if (indexBuffer.isPoison())
- {
- throw new InterruptedException("Cancel processing received.");
- }
- return indexBuffer;
- }
-
- void processDN2ID(Suffix suffix, DN dn, EntryID entryID) throws InterruptedException
- {
- DN2ID dn2id = suffix.getDN2ID();
- byte[] dnBytes = JebFormat.dnToDNKey(dn, suffix.getBaseDN().size());
- int id = processKey(dn2id, dnBytes, entryID, dnIndexKey, true);
- idSuffixMap.putIfAbsent(id, suffix);
- }
-
- void processDN2URI(Suffix suffix, Entry oldEntry, Entry newEntry) throws DatabaseException
- {
- DN2URI dn2uri = suffix.getDN2URI();
- if (oldEntry != null)
- {
- dn2uri.replaceEntry(null, oldEntry, newEntry);
- }
- else
- {
- dn2uri.addEntry(null, newEntry);
- }
- }
- }
-
- /**
- * This task reads sorted records from the temporary index scratch files,
- * processes the records and writes the results to the index database. The DN
- * index is treated differently then non-DN indexes.
- */
- private final class IndexDBWriteTask implements Callable<Void>
- {
- private final IndexManager indexMgr;
- private final DatabaseEntry dbKey, dbValue;
- private final int cacheSize;
- private final Map<Integer, DNState> dnStateMap = new HashMap<>();
- private final Semaphore permits;
- private final int maxPermits;
- private final AtomicLong bytesRead = new AtomicLong();
- private long lastBytesRead;
- private final AtomicInteger keyCount = new AtomicInteger();
- private RandomAccessFile bufferFile;
- private DataInputStream bufferIndexFile;
- private int remainingBuffers;
- private volatile int totalBatches;
- private AtomicInteger batchNumber = new AtomicInteger();
- private int nextBufferID;
- private int ownedPermits;
- private volatile boolean isRunning;
-
- /**
- * Creates a new index DB writer.
- *
- * @param indexMgr
- * The index manager.
- * @param permits
- * The semaphore used for restricting the number of buffer
- * allocations.
- * @param maxPermits
- * The maximum number of buffers which can be allocated.
- * @param cacheSize
- * The buffer cache size.
- */
- public IndexDBWriteTask(IndexManager indexMgr, Semaphore permits,
- int maxPermits, int cacheSize)
- {
- this.indexMgr = indexMgr;
- this.permits = permits;
- this.maxPermits = maxPermits;
- this.cacheSize = cacheSize;
-
- this.dbKey = new DatabaseEntry();
- this.dbValue = new DatabaseEntry();
- }
-
- /**
- * Initializes this task.
- *
- * @throws IOException
- * If an IO error occurred.
- */
- public void beginWriteTask() throws IOException
- {
- bufferFile = new RandomAccessFile(indexMgr.getBufferFile(), "r");
- bufferIndexFile =
- new DataInputStream(new BufferedInputStream(new FileInputStream(
- indexMgr.getBufferIndexFile())));
-
- remainingBuffers = indexMgr.getNumberOfBuffers();
- totalBatches = (remainingBuffers / maxPermits) + 1;
- batchNumber.set(0);
- nextBufferID = 0;
- ownedPermits = 0;
-
- logger.info(NOTE_IMPORT_LDIF_INDEX_STARTED, indexMgr.getBufferFileName(), remainingBuffers, totalBatches);
-
- indexMgr.setIndexDBWriteTask(this);
- isRunning = true;
- }
-
- /**
- * Returns the next batch of buffers to be processed, blocking until enough
- * buffer permits are available.
- *
- * @return The next batch of buffers, or {@code null} if there are no more
- * buffers to be processed.
- * @throws Exception
- * If an exception occurred.
- */
- public NavigableSet<IndexInputBuffer> getNextBufferBatch() throws Exception
- {
- // First release any previously acquired permits.
- if (ownedPermits > 0)
- {
- permits.release(ownedPermits);
- ownedPermits = 0;
- }
-
- // Block until we can either get enough permits for all buffers, or the
- // maximum number of permits.
- final int permitRequest = Math.min(remainingBuffers, maxPermits);
- if (permitRequest == 0)
- {
- // No more work to do.
- return null;
- }
- permits.acquire(permitRequest);
-
- // Update counters.
- ownedPermits = permitRequest;
- remainingBuffers -= permitRequest;
- batchNumber.incrementAndGet();
-
- // Create all the index buffers for the next batch.
- final NavigableSet<IndexInputBuffer> buffers = new TreeSet<>();
- for (int i = 0; i < permitRequest; i++)
- {
- final long bufferBegin = bufferIndexFile.readLong();
- final long bufferEnd = bufferIndexFile.readLong();
- final IndexInputBuffer b =
- new IndexInputBuffer(indexMgr, bufferFile.getChannel(),
- bufferBegin, bufferEnd, nextBufferID++, cacheSize);
- buffers.add(b);
- }
-
- return buffers;
- }
-
- /**
- * Finishes this task.
- */
- public void endWriteTask()
- {
- isRunning = false;
-
- // First release any previously acquired permits.
- if (ownedPermits > 0)
- {
- permits.release(ownedPermits);
- ownedPermits = 0;
- }
-
- try
- {
- if (indexMgr.isDN2ID())
- {
- for (DNState dnState : dnStateMap.values())
- {
- dnState.flush();
- }
- if (!isCanceled)
- {
- logger.info(NOTE_IMPORT_LDIF_DN_CLOSE, indexMgr.getDNCount());
- }
- }
- else
- {
- if (!isCanceled)
- {
- logger.info(NOTE_IMPORT_LDIF_INDEX_CLOSE, indexMgr.getBufferFileName());
- }
- }
- }
- finally
- {
- close(bufferFile, bufferIndexFile);
-
- indexMgr.getBufferFile().delete();
- indexMgr.getBufferIndexFile().delete();
- }
- }
-
- /**
- * Print out progress stats.
- *
- * @param deltaTime
- * The time since the last update.
- */
- public void printStats(long deltaTime)
- {
- if (isRunning)
- {
- final long bufferFileSize = indexMgr.getBufferFileSize();
- final long tmpBytesRead = bytesRead.get();
- final int currentBatch = batchNumber.get();
-
- final long bytesReadInterval = tmpBytesRead - lastBytesRead;
- final int bytesReadPercent =
- Math.round((100f * tmpBytesRead) / bufferFileSize);
-
- // Kilo and milli approximately cancel out.
- final long kiloBytesRate = bytesReadInterval / deltaTime;
- final long kiloBytesRemaining = (bufferFileSize - tmpBytesRead) / 1024;
-
- logger.info(NOTE_IMPORT_LDIF_PHASE_TWO_REPORT, indexMgr.getBufferFileName(),
- bytesReadPercent, kiloBytesRemaining, kiloBytesRate, currentBatch, totalBatches);
-
- lastBytesRead = tmpBytesRead;
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws Exception, DirectoryException
- {
- ByteBuffer key = null;
- ImportIDSet insertIDSet = null;
- ImportIDSet deleteIDSet = null;
-
- if (isCanceled)
- {
- return null;
- }
-
- try
- {
- beginWriteTask();
-
- NavigableSet<IndexInputBuffer> bufferSet;
- while ((bufferSet = getNextBufferBatch()) != null)
- {
- if (isCanceled)
- {
- return null;
- }
-
- Integer indexID = null;
- while (!bufferSet.isEmpty())
- {
- IndexInputBuffer b = bufferSet.pollFirst();
- if (key == null)
- {
- indexID = b.getIndexID();
-
- if (indexMgr.isDN2ID())
- {
- insertIDSet = new ImportIDSet(1, 1, false);
- deleteIDSet = new ImportIDSet(1, 1, false);
- }
- else
- {
- Index index = idContainerMap.get(indexID);
- int limit = index.getIndexEntryLimit();
- boolean doCount = index.getMaintainCount();
- insertIDSet = new ImportIDSet(1, limit, doCount);
- deleteIDSet = new ImportIDSet(1, limit, doCount);
- }
-
- key = ByteBuffer.allocate(b.getKeyLen());
- key.flip();
- b.fetchKey(key);
-
- b.mergeIDSet(insertIDSet);
- b.mergeIDSet(deleteIDSet);
- insertIDSet.setKey(key);
- deleteIDSet.setKey(key);
- }
- else if (b.compare(key, indexID) != 0)
- {
- addToDB(indexID, insertIDSet, deleteIDSet);
- keyCount.incrementAndGet();
-
- indexID = b.getIndexID();
-
- if (indexMgr.isDN2ID())
- {
- insertIDSet = new ImportIDSet(1, 1, false);
- deleteIDSet = new ImportIDSet(1, 1, false);
- }
- else
- {
- Index index = idContainerMap.get(indexID);
- int limit = index.getIndexEntryLimit();
- boolean doCount = index.getMaintainCount();
- insertIDSet = new ImportIDSet(1, limit, doCount);
- deleteIDSet = new ImportIDSet(1, limit, doCount);
- }
-
- key.clear();
- if (b.getKeyLen() > key.capacity())
- {
- key = ByteBuffer.allocate(b.getKeyLen());
- }
- key.flip();
- b.fetchKey(key);
-
- b.mergeIDSet(insertIDSet);
- b.mergeIDSet(deleteIDSet);
- insertIDSet.setKey(key);
- deleteIDSet.setKey(key);
- }
- else
- {
- b.mergeIDSet(insertIDSet);
- b.mergeIDSet(deleteIDSet);
- }
-
- if (b.hasMoreData())
- {
- b.fetchNextRecord();
- bufferSet.add(b);
- }
- }
-
- if (key != null)
- {
- addToDB(indexID, insertIDSet, deleteIDSet);
- }
- }
- return null;
- }
- catch (Exception e)
- {
- logger.error(ERR_IMPORT_LDIF_INDEX_WRITE_DB_ERR, indexMgr.getBufferFileName(), e.getMessage());
- throw e;
- }
- finally
- {
- endWriteTask();
- }
- }
-
- private void addToDB(int indexID, ImportIDSet insertSet, ImportIDSet deleteSet) throws DirectoryException
- {
- if (indexMgr.isDN2ID())
- {
- addDN2ID(indexID, insertSet);
- }
- else
- {
- if (deleteSet.size() > 0 || !deleteSet.isDefined())
- {
- dbKey.setData(deleteSet.getKey().array(), 0, deleteSet.getKey().limit());
- final Index index = idContainerMap.get(indexID);
- index.delete(dbKey, deleteSet, dbValue);
- }
- if (insertSet.size() > 0 || !insertSet.isDefined())
- {
- dbKey.setData(insertSet.getKey().array(), 0, insertSet.getKey().limit());
- final Index index = idContainerMap.get(indexID);
- index.insert(dbKey, insertSet, dbValue);
- }
- }
- }
-
- private void addDN2ID(int indexID, ImportIDSet record) throws DirectoryException
- {
- DNState dnState;
- if (!dnStateMap.containsKey(indexID))
- {
- dnState = new DNState(idSuffixMap.get(indexID));
- dnStateMap.put(indexID, dnState);
- }
- else
- {
- dnState = dnStateMap.get(indexID);
- }
- if (dnState.checkParent(record))
- {
- dnState.writeToDB();
- }
- }
-
- private void addBytesRead(int bytesRead)
- {
- this.bytesRead.addAndGet(bytesRead);
- }
-
- /**
- * This class is used to by a index DB merge thread performing DN processing
- * to keep track of the state of individual DN2ID index processing.
- */
- class DNState
- {
- private static final int DN_STATE_CACHE_SIZE = 64 * KB;
-
- private ByteBuffer parentDN, lastDN;
- private EntryID parentID, lastID, entryID;
- private final DatabaseEntry dnKey, dnValue;
- private final TreeMap<ByteBuffer, EntryID> parentIDMap;
- private final EntryContainer entryContainer;
- private final boolean isSubordinatesEnabled;
- // Fields below are only needed if the isSubordinatesEnabled boolean is true.
- private Map<byte[], ImportIDSet> id2childTree;
- private Map<byte[], ImportIDSet> id2subtreeTree;
- private int childLimit, subTreeLimit;
- private boolean childDoCount, subTreeDoCount;
- private boolean updateID2Children, updateID2Subtree;
-
- DNState(Suffix suffix)
- {
- this.entryContainer = suffix.getEntryContainer();
- parentIDMap = new TreeMap<>();
-
- isSubordinatesEnabled = backendConfiguration.isSubordinateIndexesEnabled();
- if (suffix.isProcessID2Children())
- {
- childLimit = entryContainer.getID2Children().getIndexEntryLimit();
- childDoCount = isSubordinatesEnabled && entryContainer.getID2Children().getMaintainCount();
- id2childTree = new TreeMap<>(entryContainer.getID2Children().getComparator());
- updateID2Children = true;
- }
- if (suffix.isProcessID2Subtree())
- {
- subTreeLimit = entryContainer.getID2Subtree().getIndexEntryLimit();
- subTreeDoCount = isSubordinatesEnabled && entryContainer.getID2Subtree().getMaintainCount();
- id2subtreeTree = new TreeMap<>(entryContainer.getID2Subtree().getComparator());
- updateID2Subtree = true;
- }
- dnKey = new DatabaseEntry();
- dnValue = new DatabaseEntry();
- lastDN = ByteBuffer.allocate(BYTE_BUFFER_CAPACITY);
- }
-
- private ByteBuffer getParent(ByteBuffer buffer)
- {
- int parentIndex = JebFormat.findDNKeyParent(buffer.array(), 0, buffer.limit());
- if (parentIndex < 0)
- {
- // This is the root or base DN
- return null;
- }
- ByteBuffer parent = buffer.duplicate();
- parent.limit(parentIndex);
- return parent;
- }
-
- private ByteBuffer deepCopy(ByteBuffer srcBuffer, ByteBuffer destBuffer)
- {
- if (destBuffer == null
- || destBuffer.clear().remaining() < srcBuffer.limit())
- {
- byte[] bytes = new byte[srcBuffer.limit()];
- System.arraycopy(srcBuffer.array(), 0, bytes, 0, srcBuffer.limit());
- return ByteBuffer.wrap(bytes);
- }
- else
- {
- destBuffer.put(srcBuffer);
- destBuffer.flip();
- return destBuffer;
- }
- }
-
- /** Why do we still need this if we are checking parents in the first phase? */
- private boolean checkParent(ImportIDSet record) throws DatabaseException
- {
- dnKey.setData(record.getKey().array(), 0, record.getKey().limit());
- byte[] v = record.toDatabase();
- long v1 = JebFormat.entryIDFromDatabase(v);
- dnValue.setData(v);
-
- entryID = new EntryID(v1);
- parentDN = getParent(record.getKey());
-
- //Bypass the cache for append data, lookup the parent in DN2ID and return.
- if (importConfiguration != null
- && importConfiguration.appendToExistingData())
- {
- //If null is returned than this is a suffix DN.
- if (parentDN != null)
- {
- DatabaseEntry key = new DatabaseEntry(parentDN.array(), 0, parentDN.limit());
- DatabaseEntry value = new DatabaseEntry();
- OperationStatus status = entryContainer.getDN2ID().read(null, key, value, LockMode.DEFAULT);
- if (status == OperationStatus.SUCCESS)
- {
- parentID = new EntryID(value);
- }
- else
- {
- // We have a missing parent. Maybe parent checking was turned off?
- // Just ignore.
- parentID = null;
- return false;
- }
- }
- }
- else if (parentIDMap.isEmpty())
- {
- parentIDMap.put(deepCopy(record.getKey(), null), entryID);
- return true;
- }
- else if (lastDN != null && lastDN.equals(parentDN))
- {
- parentIDMap.put(deepCopy(lastDN, null), lastID);
- parentID = lastID;
- lastDN = deepCopy(record.getKey(), lastDN);
- lastID = entryID;
- return true;
- }
- else if (parentIDMap.lastKey().equals(parentDN))
- {
- parentID = parentIDMap.get(parentDN);
- lastDN = deepCopy(record.getKey(), lastDN);
- lastID = entryID;
- return true;
- }
- else if (parentIDMap.containsKey(parentDN))
- {
- EntryID newParentID = parentIDMap.get(parentDN);
- ByteBuffer key = parentIDMap.lastKey();
- while (!parentDN.equals(key))
- {
- parentIDMap.remove(key);
- key = parentIDMap.lastKey();
- }
- parentIDMap.put(deepCopy(record.getKey(), null), entryID);
- parentID = newParentID;
- lastDN = deepCopy(record.getKey(), lastDN);
- lastID = entryID;
- }
- else
- {
- // We have a missing parent. Maybe parent checking was turned off?
- // Just ignore.
- parentID = null;
- return false;
- }
- return true;
- }
-
- private void id2child(EntryID childID) throws DirectoryException
- {
- if (parentID != null)
- {
- ImportIDSet idSet;
- byte[] parentIDBytes = parentID.getDatabaseEntry().getData();
- if (!id2childTree.containsKey(parentIDBytes))
- {
- idSet = new ImportIDSet(1, childLimit, childDoCount);
- id2childTree.put(parentIDBytes, idSet);
- }
- else
- {
- idSet = id2childTree.get(parentIDBytes);
- }
- idSet.addEntryID(childID);
- if (id2childTree.size() > DN_STATE_CACHE_SIZE)
- {
- flushMapToDB(id2childTree, entryContainer.getID2Children(), true);
- }
- }
- else
- {
- throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION,
- ERR_PARENT_ENTRY_IS_MISSING.get());
- }
- }
-
- private EntryID getParentID(ByteBuffer dn) throws DatabaseException
- {
- // Bypass the cache for append data, lookup the parent DN in the DN2ID db
- if (importConfiguration == null || !importConfiguration.appendToExistingData())
- {
- return parentIDMap.get(dn);
- }
- DatabaseEntry key = new DatabaseEntry(dn.array(), 0, dn.limit());
- DatabaseEntry value = new DatabaseEntry();
- OperationStatus status = entryContainer.getDN2ID().read(null, key, value, LockMode.DEFAULT);
- if (status == OperationStatus.SUCCESS)
- {
- return new EntryID(value);
- }
- return null;
- }
-
- private void id2SubTree(EntryID childID) throws DirectoryException
- {
- if (parentID != null)
- {
- ImportIDSet idSet;
- byte[] parentIDBytes = parentID.getDatabaseEntry().getData();
- if (!id2subtreeTree.containsKey(parentIDBytes))
- {
- idSet = new ImportIDSet(1, subTreeLimit, subTreeDoCount);
- id2subtreeTree.put(parentIDBytes, idSet);
- }
- else
- {
- idSet = id2subtreeTree.get(parentIDBytes);
- }
- idSet.addEntryID(childID);
- // TODO:
- // Instead of doing this,
- // we can just walk to parent cache if available
- for (ByteBuffer dn = getParent(parentDN); dn != null; dn = getParent(dn))
- {
- EntryID nodeID = getParentID(dn);
- if (nodeID == null)
- {
- // We have a missing parent. Maybe parent checking was turned off?
- // Just ignore.
- break;
- }
-
- byte[] nodeIDBytes = nodeID.getDatabaseEntry().getData();
- if (!id2subtreeTree.containsKey(nodeIDBytes))
- {
- idSet = new ImportIDSet(1, subTreeLimit, subTreeDoCount);
- id2subtreeTree.put(nodeIDBytes, idSet);
- }
- else
- {
- idSet = id2subtreeTree.get(nodeIDBytes);
- }
- idSet.addEntryID(childID);
- }
- if (id2subtreeTree.size() > DN_STATE_CACHE_SIZE)
- {
- flushMapToDB(id2subtreeTree, entryContainer.getID2Subtree(), true);
- }
- }
- else
- {
- throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION,
- ERR_PARENT_ENTRY_IS_MISSING.get());
- }
- }
-
- public void writeToDB() throws DirectoryException
- {
- entryContainer.getDN2ID().put(null, dnKey, dnValue);
- indexMgr.addTotDNCount(1);
- if (isSubordinatesEnabled && parentDN != null)
- {
- if (updateID2Children)
- {
- id2child(entryID);
- }
- if (updateID2Subtree)
- {
- id2SubTree(entryID);
- }
- }
- }
-
- private void flushMapToDB(Map<byte[], ImportIDSet> map, Index index,
- boolean clearMap)
- {
- for (Map.Entry<byte[], ImportIDSet> e : map.entrySet())
- {
- byte[] key = e.getKey();
- ImportIDSet idSet = e.getValue();
- dnKey.setData(key);
- index.insert(dnKey, idSet, dnValue);
- }
- if (clearMap)
- {
- map.clear();
- }
- }
-
- public void flush()
- {
- if (isSubordinatesEnabled)
- {
- if (updateID2Children)
- {
- flushMapToDB(id2childTree, entryContainer.getID2Children(), false);
- }
- if (updateID2Subtree)
- {
- flushMapToDB(id2subtreeTree, entryContainer.getID2Subtree(), false);
- }
- }
- }
- }
- }
-
- /**
- * This task writes the temporary scratch index files using the sorted buffers
- * read from a blocking queue private to each index.
- */
- private final class ScratchFileWriterTask implements Callable<Void>
- {
- private static final int DRAIN_TO = 3;
-
- private final IndexManager indexMgr;
- private final BlockingQueue<IndexOutputBuffer> queue;
- private final ByteArrayOutputStream insertByteStream = new ByteArrayOutputStream(2 * bufferSize);
- private final ByteArrayOutputStream deleteByteStream = new ByteArrayOutputStream(2 * bufferSize);
- private final DataOutputStream bufferStream;
- private final DataOutputStream bufferIndexStream;
- private final byte[] tmpArray = new byte[8];
- private final TreeSet<IndexOutputBuffer> indexSortedSet = new TreeSet<>();
- private int insertKeyCount, deleteKeyCount;
- private int bufferCount;
- private boolean poisonSeen;
-
- public ScratchFileWriterTask(BlockingQueue<IndexOutputBuffer> queue,
- IndexManager indexMgr) throws FileNotFoundException
- {
- this.queue = queue;
- this.indexMgr = indexMgr;
- this.bufferStream = newDataOutputStream(indexMgr.getBufferFile());
- this.bufferIndexStream = newDataOutputStream(indexMgr.getBufferIndexFile());
- }
-
- private DataOutputStream newDataOutputStream(File file) throws FileNotFoundException
- {
- return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(file), READER_WRITER_BUFFER_SIZE));
- }
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws IOException, InterruptedException
- {
- long offset = 0;
- List<IndexOutputBuffer> l = new LinkedList<>();
- try
- {
- while (true)
- {
- final IndexOutputBuffer indexBuffer = queue.take();
- long beginOffset = offset;
- long bufferLen;
- if (!queue.isEmpty())
- {
- queue.drainTo(l, DRAIN_TO);
- l.add(indexBuffer);
- bufferLen = writeIndexBuffers(l);
- for (IndexOutputBuffer id : l)
- {
- if (!id.isDiscarded())
- {
- id.reset();
- freeBufferQueue.add(id);
- }
- }
- l.clear();
- }
- else
- {
- if (indexBuffer.isPoison())
- {
- break;
- }
- bufferLen = writeIndexBuffer(indexBuffer);
- if (!indexBuffer.isDiscarded())
- {
- indexBuffer.reset();
- freeBufferQueue.add(indexBuffer);
- }
- }
- offset += bufferLen;
-
- // Write buffer index information.
- bufferIndexStream.writeLong(beginOffset);
- bufferIndexStream.writeLong(offset);
-
- bufferCount++;
- Importer.this.bufferCount.incrementAndGet();
-
- if (poisonSeen)
- {
- break;
- }
- }
- return null;
- }
- catch (IOException e)
- {
- logger.error(ERR_IMPORT_LDIF_INDEX_FILEWRITER_ERR, indexMgr.getBufferFile().getAbsolutePath(), e.getMessage());
- isCanceled = true;
- throw e;
- }
- finally
- {
- close(bufferStream, bufferIndexStream);
- indexMgr.setBufferInfo(bufferCount, indexMgr.getBufferFile().length());
- }
- }
-
- private long writeIndexBuffer(IndexOutputBuffer indexBuffer) throws IOException
- {
- indexBuffer.setPosition(-1);
- resetStreams();
-
- long bufferLen = 0;
- final int numberKeys = indexBuffer.getNumberKeys();
- for (int i = 0; i < numberKeys; i++)
- {
- if (indexBuffer.getPosition() == -1)
- {
- indexBuffer.setPosition(i);
- insertOrDeleteKey(indexBuffer, i);
- continue;
- }
- if (!indexBuffer.compare(i))
- {
- bufferLen += writeRecord(indexBuffer);
- indexBuffer.setPosition(i);
- resetStreams();
- }
- insertOrDeleteKeyCheckEntryLimit(indexBuffer, i);
- }
- if (indexBuffer.getPosition() != -1)
- {
- bufferLen += writeRecord(indexBuffer);
- }
- return bufferLen;
- }
-
- private long writeIndexBuffers(List<IndexOutputBuffer> buffers) throws IOException
- {
- resetStreams();
-
- long bufferID = 0;
- long bufferLen = 0;
- for (IndexOutputBuffer b : buffers)
- {
- if (b.isPoison())
- {
- poisonSeen = true;
- }
- else
- {
- b.setPosition(0);
- b.setBufferID(bufferID++);
- indexSortedSet.add(b);
- }
- }
- byte[] saveKey = null;
- int saveIndexID = 0;
- while (!indexSortedSet.isEmpty())
- {
- final IndexOutputBuffer b = indexSortedSet.pollFirst();
- if (saveKey == null)
- {
- saveKey = b.getKey();
- saveIndexID = b.getIndexID();
- insertOrDeleteKey(b, b.getPosition());
- }
- else if (!b.compare(saveKey, saveIndexID))
- {
- bufferLen += writeRecord(saveKey, saveIndexID);
- resetStreams();
- saveKey = b.getKey();
- saveIndexID = b.getIndexID();
- insertOrDeleteKey(b, b.getPosition());
- }
- else
- {
- insertOrDeleteKeyCheckEntryLimit(b, b.getPosition());
- }
- if (b.hasMoreData())
- {
- b.nextRecord();
- indexSortedSet.add(b);
- }
- }
- if (saveKey != null)
- {
- bufferLen += writeRecord(saveKey, saveIndexID);
- }
- return bufferLen;
- }
-
- private void resetStreams()
- {
- insertByteStream.reset();
- insertKeyCount = 0;
- deleteByteStream.reset();
- deleteKeyCount = 0;
- }
-
- private void insertOrDeleteKey(IndexOutputBuffer indexBuffer, int position)
- {
- if (indexBuffer.isInsertRecord(position))
- {
- indexBuffer.writeEntryID(insertByteStream, position);
- insertKeyCount++;
- }
- else
- {
- indexBuffer.writeEntryID(deleteByteStream, position);
- deleteKeyCount++;
- }
- }
-
- private void insertOrDeleteKeyCheckEntryLimit(IndexOutputBuffer indexBuffer, int position)
- {
- if (indexBuffer.isInsertRecord(position))
- {
- if (insertKeyCount++ <= indexMgr.getLimit())
- {
- indexBuffer.writeEntryID(insertByteStream, position);
- }
- }
- else
- {
- indexBuffer.writeEntryID(deleteByteStream, position);
- deleteKeyCount++;
- }
- }
-
- private int writeByteStreams() throws IOException
- {
- if (insertKeyCount > indexMgr.getLimit())
- {
- // special handling when index entry limit has been exceeded
- insertKeyCount = 1;
- insertByteStream.reset();
- writePackedLong(insertByteStream, IndexInputBuffer.UNDEFINED_SIZE);
- }
-
- int insertSize = writePackedInt(bufferStream, insertKeyCount);
- if (insertByteStream.size() > 0)
- {
- insertByteStream.writeTo(bufferStream);
- }
-
- int deleteSize = writePackedInt(bufferStream, deleteKeyCount);
- if (deleteByteStream.size() > 0)
- {
- deleteByteStream.writeTo(bufferStream);
- }
- return insertSize + insertByteStream.size() + deleteSize + deleteByteStream.size();
- }
-
- private int writeHeader(int indexID, int keySize) throws IOException
- {
- bufferStream.writeInt(indexID);
- return INT_SIZE + writePackedInt(bufferStream, keySize);
- }
-
- private int writeRecord(IndexOutputBuffer b) throws IOException
- {
- int keySize = b.getKeySize();
- int headerSize = writeHeader(b.getIndexID(), keySize);
- b.writeKey(bufferStream);
- int bodySize = writeByteStreams();
- return headerSize + keySize + bodySize;
- }
-
- private int writeRecord(byte[] k, int indexID) throws IOException
- {
- int keySize = k.length;
- int headerSize = writeHeader(indexID, keySize);
- bufferStream.write(k);
- int bodySize = writeByteStreams();
- return headerSize + keySize + bodySize;
- }
-
- private int writePackedInt(OutputStream stream, int value) throws IOException
- {
- int writeSize = PackedInteger.getWriteIntLength(value);
- PackedInteger.writeInt(tmpArray, 0, value);
- stream.write(tmpArray, 0, writeSize);
- return writeSize;
- }
-
- private int writePackedLong(OutputStream stream, long value) throws IOException
- {
- int writeSize = PackedInteger.getWriteLongLength(value);
- PackedInteger.writeLong(tmpArray, 0, value);
- stream.write(tmpArray, 0, writeSize);
- return writeSize;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return getClass().getSimpleName() + "(" + indexMgr.getBufferFileName() + ": " + indexMgr.getBufferFile() + ")";
- }
- }
-
- /**
- * This task main function is to sort the index buffers given to it from the
- * import tasks reading the LDIF file. It will also create a index file writer
- * task and corresponding queue if needed. The sorted index buffers are put on
- * the index file writer queues for writing to a temporary file.
- */
- private final class SortTask implements Callable<Void>
- {
-
- private final IndexOutputBuffer indexBuffer;
-
- public SortTask(IndexOutputBuffer indexBuffer)
- {
- this.indexBuffer = indexBuffer;
- }
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws Exception
- {
- if ((importConfiguration != null && importConfiguration.isCancelled())
- || isCanceled)
- {
- isCanceled = true;
- return null;
- }
- indexBuffer.sort();
- final IndexKey indexKey = indexBuffer.getIndexKey();
- if (!indexKeyQueueMap.containsKey(indexKey))
- {
- createIndexWriterTask(indexKey);
- }
- indexKeyQueueMap.get(indexKey).add(indexBuffer);
- return null;
- }
-
- private void createIndexWriterTask(IndexKey indexKey) throws FileNotFoundException
- {
- synchronized (synObj)
- {
- if (indexKeyQueueMap.containsKey(indexKey))
- {
- return;
- }
- boolean isDN2ID = ImportIndexType.DN.toString().equals(indexKey.getIndexName());
- IndexManager indexMgr = new IndexManager(indexKey.getName(), isDN2ID, indexKey.getEntryLimit());
- if (isDN2ID)
- {
- DNIndexMgrList.add(indexMgr);
- }
- else
- {
- indexMgrList.add(indexMgr);
- }
- BlockingQueue<IndexOutputBuffer> newQueue = new ArrayBlockingQueue<>(phaseOneBufferCount);
- ScratchFileWriterTask indexWriter = new ScratchFileWriterTask(newQueue, indexMgr);
- scratchFileWriterList.add(indexWriter);
- scratchFileWriterFutures.add(scratchFileWriterService.submit(indexWriter));
- indexKeyQueueMap.put(indexKey, newQueue);
- }
- }
- }
-
- /**
- * The index manager class has several functions:
- * <ol>
- * <li>It is used to carry information about index processing created in phase one to phase two</li>
- * <li>It collects statistics about phase two processing for each index</li>
- * <li>It manages opening and closing the scratch index files</li>
- * </ol>
- */
- final class IndexManager implements Comparable<IndexManager>
- {
- private final File bufferFile;
- private final String bufferFileName;
- private final File bufferIndexFile;
- private final boolean isDN2ID;
- private final int limit;
-
- private int numberOfBuffers;
- private long bufferFileSize;
- private long totalDNs;
- private volatile IndexDBWriteTask writer;
-
- private IndexManager(String fileName, boolean isDN2ID, int limit)
- {
- this.bufferFileName = fileName;
- this.bufferFile = new File(tempDir, bufferFileName);
- this.bufferIndexFile = new File(tempDir, bufferFileName + ".index");
-
- this.isDN2ID = isDN2ID;
- this.limit = limit > 0 ? limit : Integer.MAX_VALUE;
- }
-
- private void setIndexDBWriteTask(IndexDBWriteTask writer)
- {
- this.writer = writer;
- }
-
- private File getBufferFile()
- {
- return bufferFile;
- }
-
- private long getBufferFileSize()
- {
- return bufferFileSize;
- }
-
- private File getBufferIndexFile()
- {
- return bufferIndexFile;
- }
-
- private void setBufferInfo(int numberOfBuffers, long bufferFileSize)
- {
- this.numberOfBuffers = numberOfBuffers;
- this.bufferFileSize = bufferFileSize;
- }
-
- /**
- * Updates the bytes read counter.
- *
- * @param bytesRead
- * The number of bytes read.
- */
- void addBytesRead(int bytesRead)
- {
- if (writer != null)
- {
- writer.addBytesRead(bytesRead);
- }
- }
-
- private void addTotDNCount(int delta)
- {
- totalDNs += delta;
- }
-
- private long getDNCount()
- {
- return totalDNs;
- }
-
- private boolean isDN2ID()
- {
- return isDN2ID;
- }
-
- private void printStats(long deltaTime)
- {
- if (writer != null)
- {
- writer.printStats(deltaTime);
- }
- }
-
- /**
- * Returns the file name associated with this index manager.
- *
- * @return The file name associated with this index manager.
- */
- String getBufferFileName()
- {
- return bufferFileName;
- }
-
- private int getLimit()
- {
- return limit;
- }
-
- /** {@inheritDoc} */
- @Override
- public int compareTo(IndexManager mgr)
- {
- return numberOfBuffers - mgr.numberOfBuffers;
- }
-
- private int getNumberOfBuffers()
- {
- return numberOfBuffers;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return getClass().getSimpleName() + "(" + bufferFileName + ": " + bufferFile + ")";
- }
- }
-
- /**
- * The rebuild index manager handles all rebuild index related processing.
- */
- private class RebuildIndexManager extends ImportTask implements
- DiskSpaceMonitorHandler
- {
-
- /** Rebuild index configuration. */
- private final RebuildConfig rebuildConfig;
- /** Local DB backend configuration. */
- private final LocalDBBackendCfg cfg;
-
- /** Map of index keys to indexes. */
- private final Map<IndexKey, Index> indexMap = new LinkedHashMap<>();
- /** Map of index keys to extensible indexes. */
- private final Map<IndexKey, Collection<Index>> extensibleIndexMap = new LinkedHashMap<>();
- /** List of VLV indexes. */
- private final List<VLVIndex> vlvIndexes = new LinkedList<>();
-
- /** Total entries to be processed. */
- private long totalEntries;
-
- /** Total entries processed. */
- private final AtomicLong entriesProcessed = new AtomicLong(0);
-
- /** The suffix instance. */
- private Suffix suffix;
-
- /** The entry container. */
- private EntryContainer entryContainer;
-
- private boolean reBuildDN2ID;
- private boolean reBuildDN2URI;
-
-
- /**
- * Create an instance of the rebuild index manager using the specified
- * parameters.
- *
- * @param rebuildConfig
- * The rebuild configuration to use.
- * @param cfg
- * The local DB configuration to use.
- */
- public RebuildIndexManager(RebuildConfig rebuildConfig,
- LocalDBBackendCfg cfg)
- {
- this.rebuildConfig = rebuildConfig;
- this.cfg = cfg;
- }
-
- /**
- * Initialize a rebuild index manager.
- *
- * @throws ConfigException
- * If an configuration error occurred.
- * @throws InitializationException
- * If an initialization error occurred.
- */
- public void initialize() throws ConfigException, InitializationException
- {
- entryContainer = rootContainer.getEntryContainer(rebuildConfig.getBaseDN());
- suffix = new Suffix(entryContainer, null, null, null);
- }
-
- /**
- * Print start message.
- *
- * @throws DatabaseException
- * If an database error occurred.
- */
- public void printStartMessage() throws DatabaseException
- {
- totalEntries = suffix.getID2Entry().getRecordCount();
-
- switch (rebuildConfig.getRebuildMode())
- {
- case ALL:
- logger.info(NOTE_REBUILD_ALL_START, totalEntries);
- break;
- case DEGRADED:
- logger.info(NOTE_REBUILD_DEGRADED_START, totalEntries);
- break;
- default:
- if (!rebuildConfig.isClearDegradedState()
- && logger.isInfoEnabled())
- {
- String indexes = Utils.joinAsString(", ", rebuildConfig.getRebuildList());
- logger.info(NOTE_REBUILD_START, indexes, totalEntries);
- }
- break;
- }
- }
-
- /**
- * Print stop message.
- *
- * @param startTime
- * The time the rebuild started.
- */
- public void printStopMessage(long startTime)
- {
- long finishTime = System.currentTimeMillis();
- long totalTime = finishTime - startTime;
- float rate = 0;
- if (totalTime > 0)
- {
- rate = 1000f * entriesProcessed.get() / totalTime;
- }
-
- if (!rebuildConfig.isClearDegradedState())
- {
- logger.info(NOTE_REBUILD_FINAL_STATUS, entriesProcessed.get(), totalTime / 1000, rate);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public Void call() throws Exception
- {
- ID2Entry id2entry = entryContainer.getID2Entry();
- DiskOrderedCursor cursor =
- id2entry.openCursor(DiskOrderedCursorConfig.DEFAULT);
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- try
- {
- while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
- {
- if (isCanceled)
- {
- return null;
- }
- EntryID entryID = new EntryID(key);
- Entry entry =
- ID2Entry.entryFromDatabase(ByteString.wrap(data.getData()),
- entryContainer.getRootContainer().getCompressedSchema());
- processEntry(entry, entryID);
- entriesProcessed.getAndIncrement();
- }
- flushIndexBuffers();
- return null;
- }
- catch (Exception e)
- {
- logger.traceException(e);
- logger.error(ERR_IMPORT_LDIF_REBUILD_INDEX_TASK_ERR, stackTraceToSingleLineString(e));
- isCanceled = true;
- throw e;
- }
- finally
- {
- close(cursor);
- }
- }
-
- /**
- * Perform rebuild index processing.
- *
- * @throws DatabaseException
- * If an database error occurred.
- * @throws InterruptedException
- * If an interrupted error occurred.
- * @throws ExecutionException
- * If an Execution error occurred.
- * @throws JebException
- * If an JEB error occurred.
- */
- public void rebuildIndexes() throws DatabaseException,
- InterruptedException, ExecutionException, JebException
- {
- // Sets only the needed indexes.
- setIndexesListsToBeRebuilt();
-
- if (!rebuildConfig.isClearDegradedState())
- {
- // If not in a 'clear degraded state' operation,
- // need to rebuild the indexes.
- setRebuildListIndexesTrusted(false);
- clearIndexes();
- phaseOne();
- if (isCanceled)
- {
- throw new InterruptedException("Rebuild Index canceled.");
- }
- phaseTwo();
- }
- else
- {
- logger.info(NOTE_REBUILD_CLEARDEGRADEDSTATE_FINAL_STATUS, rebuildConfig.getRebuildList());
- }
-
- setRebuildListIndexesTrusted(true);
- }
-
- @SuppressWarnings("fallthrough")
- private void setIndexesListsToBeRebuilt() throws JebException
- {
- // Depends on rebuild mode, (re)building indexes' lists.
- final RebuildMode mode = rebuildConfig.getRebuildMode();
- switch (mode)
- {
- case ALL:
- rebuildIndexMap(false);
- // falls through
- case DEGRADED:
- if (mode == RebuildMode.ALL
- || !entryContainer.getID2Children().isTrusted()
- || !entryContainer.getID2Subtree().isTrusted())
- {
- reBuildDN2ID = true;
- }
- if (mode == RebuildMode.ALL || entryContainer.getDN2URI() == null)
- {
- reBuildDN2URI = true;
- }
- if (mode == RebuildMode.DEGRADED
- || entryContainer.getAttributeIndexes().isEmpty())
- {
- rebuildIndexMap(true); // only degraded.
- }
- if (mode == RebuildMode.ALL || vlvIndexes.isEmpty())
- {
- vlvIndexes.addAll(entryContainer.getVLVIndexes());
- }
- break;
-
- case USER_DEFINED:
- // false may be required if the user wants to rebuild specific index.
- rebuildIndexMap(false);
- break;
- default:
- break;
- }
- }
-
- private void rebuildIndexMap(final boolean onlyDegraded)
- {
- // rebuildList contains the user-selected index(in USER_DEFINED mode).
- final List<String> rebuildList = rebuildConfig.getRebuildList();
- for (AttributeIndex attributeIndex : entryContainer.getAttributeIndexes())
- {
- final AttributeType attributeType = attributeIndex.getAttributeType();
- if (rebuildConfig.getRebuildMode() == RebuildMode.ALL
- || rebuildConfig.getRebuildMode() == RebuildMode.DEGRADED)
- {
- // Get all existing indexes for all && degraded mode.
- rebuildAttributeIndexes(attributeIndex, attributeType, onlyDegraded);
- }
- else if (!rebuildList.isEmpty())
- {
- // Get indexes for user defined index.
- for (final String index : rebuildList)
- {
- if (attributeType.getNameOrOID().toLowerCase().equals(index.toLowerCase()))
- {
- rebuildAttributeIndexes(attributeIndex, attributeType, onlyDegraded);
- }
- }
- }
- }
- }
-
- private void rebuildAttributeIndexes(final AttributeIndex attrIndex,
- final AttributeType attrType, final boolean onlyDegraded)
- throws DatabaseException
- {
- for(Index index : attrIndex.getAllIndexes()) {
- fillIndexMap(attrType, index, onlyDegraded);
- }
- }
-
- private void fillIndexMap(final AttributeType attrType, final Index index, final boolean onlyDegraded)
- {
- if (index != null
- && (!onlyDegraded || !index.isTrusted())
- && (!rebuildConfig.isClearDegradedState() || index.getRecordCount() == 0))
- {
- putInIdContainerMap(index);
- final IndexKey key = new IndexKey(attrType, index.getName(), index.getIndexEntryLimit());
- indexMap.put(key, index);
- }
- }
-
- private void clearIndexes() throws DatabaseException
- {
- if (reBuildDN2URI)
- {
- entryContainer.clearDatabase(entryContainer.getDN2URI());
- }
- if (reBuildDN2ID)
- {
- entryContainer.clearDatabase(entryContainer.getDN2ID());
- entryContainer.clearDatabase(entryContainer.getID2Children());
- entryContainer.clearDatabase(entryContainer.getID2Subtree());
- }
-
- if (!indexMap.isEmpty())
- {
- for (final Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet())
- {
- if (!mapEntry.getValue().isTrusted())
- {
- entryContainer.clearDatabase(mapEntry.getValue());
- }
- }
- }
-
- if (!extensibleIndexMap.isEmpty())
- {
- for (final Collection<Index> subIndexes : extensibleIndexMap.values())
- {
- if (subIndexes != null)
- {
- for (final Index subIndex : subIndexes)
- {
- entryContainer.clearDatabase(subIndex);
- }
- }
- }
- }
-
- for (final VLVIndex vlvIndex : entryContainer.getVLVIndexes())
- {
- if (!vlvIndex.isTrusted())
- {
- entryContainer.clearDatabase(vlvIndex);
- }
- }
- }
-
- private void setRebuildListIndexesTrusted(boolean trusted)
- throws JebException
- {
- try
- {
- if (reBuildDN2ID)
- {
- suffix.forceTrustedDN2IDRelated(trusted);
- }
- setTrusted(indexMap.values(), trusted);
- if (!vlvIndexes.isEmpty())
- {
- for (VLVIndex vlvIndex : vlvIndexes)
- {
- vlvIndex.setTrusted(null, trusted);
- }
- }
- if (!extensibleIndexMap.isEmpty())
- {
- for (Collection<Index> subIndexes : extensibleIndexMap.values())
- {
- setTrusted(subIndexes, trusted);
- }
- }
- }
- catch (DatabaseException ex)
- {
- throw new JebException(NOTE_IMPORT_LDIF_TRUSTED_FAILED.get(ex.getMessage()));
- }
- }
-
- private void setTrusted(Collection<Index> indexes, boolean trusted)
- {
- if (indexes != null && !indexes.isEmpty())
- {
- for (Index index : indexes)
- {
- index.setTrusted(null, trusted);
- }
- }
- }
-
- private void phaseOne() throws DatabaseException, InterruptedException,
- ExecutionException
- {
- initializeIndexBuffers();
- Timer timer = scheduleAtFixedRate(new RebuildFirstPhaseProgressTask());
- scratchFileWriterService = Executors.newFixedThreadPool(2 * indexCount);
- bufferSortService = Executors.newFixedThreadPool(threadCount);
- ExecutorService rebuildIndexService = Executors.newFixedThreadPool(threadCount);
- List<Callable<Void>> tasks = new ArrayList<>(threadCount);
- for (int i = 0; i < threadCount; i++)
- {
- tasks.add(this);
- }
- List<Future<Void>> results = rebuildIndexService.invokeAll(tasks);
- getAll(results);
- stopScratchFileWriters();
- getAll(scratchFileWriterFutures);
-
- // Try to clear as much memory as possible.
- shutdownAll(rebuildIndexService, bufferSortService, scratchFileWriterService);
- timer.cancel();
-
- clearAll(tasks, results, scratchFileWriterList, scratchFileWriterFutures, freeBufferQueue);
- indexKeyQueueMap.clear();
- }
-
- private void phaseTwo() throws InterruptedException, ExecutionException
- {
- final Timer timer = scheduleAtFixedRate(new SecondPhaseProgressTask(entriesProcessed.get()));
- try
- {
- processIndexFiles();
- }
- finally
- {
- timer.cancel();
- }
- }
-
- private Timer scheduleAtFixedRate(TimerTask task)
- {
- final Timer timer = new Timer();
- timer.scheduleAtFixedRate(task, TIMER_INTERVAL, TIMER_INTERVAL);
- return timer;
- }
-
- private int getIndexCount() throws ConfigException, JebException,
- InitializationException
- {
- switch (rebuildConfig.getRebuildMode())
- {
- case ALL:
- return getTotalIndexCount(cfg);
- case DEGRADED:
- // FIXME: since the environment is not started we cannot determine which
- // indexes are degraded. As a workaround, be conservative and assume all
- // indexes need rebuilding.
- return getTotalIndexCount(cfg);
- default:
- return getRebuildListIndexCount(cfg);
- }
- }
-
- private int getRebuildListIndexCount(LocalDBBackendCfg cfg)
- throws JebException, ConfigException, InitializationException
- {
- final List<String> rebuildList = rebuildConfig.getRebuildList();
- if (rebuildList.isEmpty())
- {
- return 0;
- }
-
- int indexCount = 0;
- for (String index : rebuildList)
- {
- final String lowerName = index.toLowerCase();
- if (DN2ID_INDEX_NAME.equals(lowerName))
- {
- indexCount += 3;
- }
- else if (DN2URI_INDEX_NAME.equals(lowerName))
- {
- indexCount++;
- }
- else if (lowerName.startsWith("vlv."))
- {
- if (lowerName.length() < 5)
- {
- throw new JebException(ERR_VLV_INDEX_NOT_CONFIGURED.get(lowerName));
- }
- indexCount++;
- }
- else if (ID2SUBTREE_INDEX_NAME.equals(lowerName)
- || ID2CHILDREN_INDEX_NAME.equals(lowerName))
- {
- throw attributeIndexNotConfigured(index);
- }
- else
- {
- final String[] attrIndexParts = lowerName.split("\\.");
- if (attrIndexParts.length <= 0 || attrIndexParts.length > 3)
- {
- throw attributeIndexNotConfigured(index);
- }
- AttributeType attrType = DirectoryServer.getAttributeTypeOrNull(attrIndexParts[0]);
- if (attrType == null)
- {
- throw attributeIndexNotConfigured(index);
- }
- if (attrIndexParts.length != 1)
- {
- final String indexType = attrIndexParts[1];
- if (attrIndexParts.length == 2)
- {
- if ("presence".equals(indexType)
- || "equality".equals(indexType)
- || "ordering".equals(indexType)
- || "substring".equals(indexType)
- || "approximate".equals(indexType))
- {
- indexCount++;
- }
- else
- {
- throw attributeIndexNotConfigured(index);
- }
- }
- else // attrIndexParts.length == 3
- {
- if (!findExtensibleMatchingRule(cfg, indexType + "." + attrIndexParts[2]))
- {
- throw attributeIndexNotConfigured(index);
- }
- indexCount++;
- }
- }
- else
- {
- boolean found = false;
- for (final String idx : cfg.listLocalDBIndexes())
- {
- if (idx.equalsIgnoreCase(index))
- {
- found = true;
- final LocalDBIndexCfg indexCfg = cfg.getLocalDBIndex(idx);
- indexCount += getAttributeIndexCount(indexCfg.getIndexType(),
- PRESENCE, EQUALITY, ORDERING, SUBSTRING, APPROXIMATE);
- indexCount += getExtensibleIndexCount(indexCfg);
- }
- }
- if (!found)
- {
- throw attributeIndexNotConfigured(index);
- }
- }
- }
- }
- return indexCount;
- }
-
- private InitializationException attributeIndexNotConfigured(String index)
- {
- return new InitializationException(ERR_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index));
- }
-
- private boolean findExtensibleMatchingRule(LocalDBBackendCfg cfg, String indexExRuleName) throws ConfigException
- {
- for (String idx : cfg.listLocalDBIndexes())
- {
- LocalDBIndexCfg indexCfg = cfg.getLocalDBIndex(idx);
- if (indexCfg.getIndexType().contains(EXTENSIBLE))
- {
- for (String exRule : indexCfg.getIndexExtensibleMatchingRule())
- {
- if (exRule.equalsIgnoreCase(indexExRuleName))
- {
- return true;
- }
- }
- }
- }
- return false;
- }
-
- private int getAttributeIndexCount(SortedSet<IndexType> indexTypes, IndexType... toFinds)
- {
- int result = 0;
- for (IndexType toFind : toFinds)
- {
- if (indexTypes.contains(toFind))
- {
- result++;
- }
- }
- return result;
- }
-
- private int getExtensibleIndexCount(LocalDBIndexCfg indexCfg)
- {
- int result = 0;
- if (indexCfg.getIndexType().contains(EXTENSIBLE))
- {
- boolean shared = false;
- for (final String exRule : indexCfg.getIndexExtensibleMatchingRule())
- {
- if (exRule.endsWith(".sub"))
- {
- result++;
- }
- else if (!shared)
- {
- shared = true;
- result++;
- }
- }
- }
- return result;
- }
-
- private void processEntry(Entry entry, EntryID entryID)
- throws DatabaseException, DirectoryException, JebException,
- InterruptedException
- {
- if (reBuildDN2ID)
- {
- processDN2ID(suffix, entry.getName(), entryID);
- }
- if (reBuildDN2URI)
- {
- processDN2URI(suffix, null, entry);
- }
- processIndexes(entry, entryID);
- processExtensibleIndexes(entry, entryID);
- processVLVIndexes(entry, entryID);
- }
-
- private void processVLVIndexes(Entry entry, EntryID entryID)
- throws DatabaseException, JebException, DirectoryException
- {
- for (VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes())
- {
- Transaction transaction = null;
- vlvIdx.addEntry(transaction, entryID, entry);
- }
- }
-
- private void processExtensibleIndexes(Entry entry, EntryID entryID)
- throws InterruptedException
- {
- for (Map.Entry<IndexKey, Collection<Index>> mapEntry :
- this.extensibleIndexMap.entrySet())
- {
- IndexKey key = mapEntry.getKey();
- AttributeType attrType = key.getAttributeType();
- if (entry.hasAttribute(attrType))
- {
- for (Index index : mapEntry.getValue())
- {
- processAttribute(index, entry, entryID, key);
- }
- }
- }
- }
-
- private void processIndexes(Entry entry, EntryID entryID)
- throws DatabaseException, InterruptedException
- {
- for (Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet())
- {
- IndexKey key = mapEntry.getKey();
- AttributeType attrType = key.getAttributeType();
- if (entry.hasAttribute(attrType))
- {
- processAttribute(mapEntry.getValue(), entry, entryID, key);
- }
- }
- }
-
- /**
- * Return the number of entries processed by the rebuild manager.
- *
- * @return The number of entries processed.
- */
- public long getEntriesProcessed()
- {
- return this.entriesProcessed.get();
- }
-
- /**
- * Return the total number of entries to process by the rebuild manager.
- *
- * @return The total number for entries to process.
- */
- public long getTotalEntries()
- {
- return this.totalEntries;
- }
-
- @Override
- public void diskLowThresholdReached(File directory, long thresholdInBytes)
- {
- diskFullThresholdReached(directory, thresholdInBytes);
- }
-
- @Override
- public void diskFullThresholdReached(File directory, long thresholdInBytes)
- {
- isCanceled = true;
- logger.error(ERR_REBUILD_INDEX_LACK_DISK, directory.getAbsolutePath(), thresholdInBytes);
- }
-
- @Override
- public void diskSpaceRestored(File directory, long lowThresholdInBytes, long fullThresholdInBytes)
- {
- // Do nothing
- }
- }
-
- /**
- * This class reports progress of rebuild index processing at fixed intervals.
- */
- private class RebuildFirstPhaseProgressTask extends TimerTask
- {
- /**
- * The number of records that had been processed at the time of the previous
- * progress report.
- */
- private long previousProcessed;
- /** The time in milliseconds of the previous progress report. */
- private long previousTime;
- /** The environment statistics at the time of the previous report. */
- private EnvironmentStats prevEnvStats;
-
- /**
- * Create a new rebuild index progress task.
- *
- * @throws DatabaseException
- * If an error occurred while accessing the JE database.
- */
- public RebuildFirstPhaseProgressTask() throws DatabaseException
- {
- previousTime = System.currentTimeMillis();
- prevEnvStats = rootContainer.getEnvironmentStats(new StatsConfig());
- }
-
- /**
- * The action to be performed by this timer task.
- */
- @Override
- public void run()
- {
- long latestTime = System.currentTimeMillis();
- long deltaTime = latestTime - previousTime;
-
- if (deltaTime == 0)
- {
- return;
- }
- long entriesProcessed = rebuildManager.getEntriesProcessed();
- long deltaCount = entriesProcessed - previousProcessed;
- float rate = 1000f * deltaCount / deltaTime;
- float completed = 0;
- if (rebuildManager.getTotalEntries() > 0)
- {
- completed = 100f * entriesProcessed / rebuildManager.getTotalEntries();
- }
- logger.info(NOTE_REBUILD_PROGRESS_REPORT, completed, entriesProcessed, rebuildManager.getTotalEntries(), rate);
- try
- {
- Runtime runtime = Runtime.getRuntime();
- long freeMemory = runtime.freeMemory() / MB;
- EnvironmentStats envStats =
- rootContainer.getEnvironmentStats(new StatsConfig());
- long nCacheMiss = envStats.getNCacheMiss() - prevEnvStats.getNCacheMiss();
-
- float cacheMissRate = 0;
- if (deltaCount > 0)
- {
- cacheMissRate = nCacheMiss / (float) deltaCount;
- }
- logger.info(INFO_CACHE_AND_MEMORY_REPORT, freeMemory, cacheMissRate);
- prevEnvStats = envStats;
- }
- catch (DatabaseException e)
- {
- // Unlikely to happen and not critical.
- }
- previousProcessed = entriesProcessed;
- previousTime = latestTime;
- }
- }
-
- /**
- * This class reports progress of first phase of import processing at fixed
- * intervals.
- */
- private final class FirstPhaseProgressTask extends TimerTask
- {
- /**
- * The number of entries that had been read at the time of the previous
- * progress report.
- */
- private long previousCount;
- /** The time in milliseconds of the previous progress report. */
- private long previousTime;
- /** The environment statistics at the time of the previous report. */
- private EnvironmentStats previousStats;
- /** Determines if eviction has been detected. */
- private boolean evicting;
- /** Entry count when eviction was detected. */
- private long evictionEntryCount;
-
- /** Create a new import progress task. */
- public FirstPhaseProgressTask()
- {
- previousTime = System.currentTimeMillis();
- try
- {
- previousStats = rootContainer.getEnvironmentStats(new StatsConfig());
- }
- catch (DatabaseException e)
- {
- throw new RuntimeException(e);
- }
- }
-
- /** The action to be performed by this timer task. */
- @Override
- public void run()
- {
- long entriesRead = reader.getEntriesRead();
- long entriesIgnored = reader.getEntriesIgnored();
- long entriesRejected = reader.getEntriesRejected();
- long deltaCount = entriesRead - previousCount;
-
- long latestTime = System.currentTimeMillis();
- long deltaTime = latestTime - previousTime;
- if (deltaTime == 0)
- {
- return;
- }
- float rate = 1000f * deltaCount / deltaTime;
- logger.info(NOTE_IMPORT_PROGRESS_REPORT, entriesRead, entriesIgnored, entriesRejected, rate);
- try
- {
- Runtime runTime = Runtime.getRuntime();
- long freeMemory = runTime.freeMemory() / MB;
- EnvironmentStats environmentStats;
-
- //If first phase skip DN validation is specified use the root container
- //stats, else use the temporary environment stats.
- if (skipDNValidation)
- {
- environmentStats =
- rootContainer.getEnvironmentStats(new StatsConfig());
- }
- else
- {
- environmentStats = tmpEnv.getEnvironmentStats(new StatsConfig());
- }
- long nCacheMiss =
- environmentStats.getNCacheMiss() - previousStats.getNCacheMiss();
-
- float cacheMissRate = 0;
- if (deltaCount > 0)
- {
- cacheMissRate = nCacheMiss / (float) deltaCount;
- }
- logger.info(INFO_CACHE_AND_MEMORY_REPORT, freeMemory, cacheMissRate);
- long evictPasses = environmentStats.getNEvictPasses();
- long evictNodes = environmentStats.getNNodesExplicitlyEvicted();
- long evictBinsStrip = environmentStats.getNBINsStripped();
- long cleanerRuns = environmentStats.getNCleanerRuns();
- long cleanerDeletions = environmentStats.getNCleanerDeletions();
- long cleanerEntriesRead = environmentStats.getNCleanerEntriesRead();
- long cleanerINCleaned = environmentStats.getNINsCleaned();
- long checkPoints = environmentStats.getNCheckpoints();
- if (evictPasses != 0)
- {
- if (!evicting)
- {
- evicting = true;
- evictionEntryCount = reader.getEntriesRead();
- logger.info(NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED, evictionEntryCount);
- }
- logger.info(NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS, evictPasses,
- evictNodes, evictBinsStrip);
- }
- if (cleanerRuns != 0)
- {
- logger.info(NOTE_JEB_IMPORT_LDIF_CLEANER_STATS, cleanerRuns,
- cleanerDeletions, cleanerEntriesRead, cleanerINCleaned);
- }
- if (checkPoints > 1)
- {
- logger.info(NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS, checkPoints);
- }
- previousStats = environmentStats;
- }
- catch (DatabaseException e)
- {
- // Unlikely to happen and not critical.
- }
- previousCount = entriesRead;
- previousTime = latestTime;
- }
- }
-
- /**
- * This class reports progress of the second phase of import processing at
- * fixed intervals.
- */
- private class SecondPhaseProgressTask extends TimerTask
- {
- /**
- * The number of entries that had been read at the time of the previous
- * progress report.
- */
- private long previousCount;
- /** The time in milliseconds of the previous progress report. */
- private long previousTime;
- /** The environment statistics at the time of the previous report. */
- private EnvironmentStats previousStats;
- /** Determines if eviction has been detected. */
- private boolean evicting;
- private long latestCount;
-
- /**
- * Create a new import progress task.
- *
- * @param latestCount
- * The latest count of entries processed in phase one.
- */
- public SecondPhaseProgressTask(long latestCount)
- {
- previousTime = System.currentTimeMillis();
- this.latestCount = latestCount;
- try
- {
- previousStats = rootContainer.getEnvironmentStats(new StatsConfig());
- }
- catch (DatabaseException e)
- {
- throw new RuntimeException(e);
- }
- }
-
- /** The action to be performed by this timer task. */
- @Override
- public void run()
- {
- long deltaCount = latestCount - previousCount;
- long latestTime = System.currentTimeMillis();
- long deltaTime = latestTime - previousTime;
- if (deltaTime == 0)
- {
- return;
- }
- try
- {
- Runtime runTime = Runtime.getRuntime();
- long freeMemory = runTime.freeMemory() / MB;
- EnvironmentStats environmentStats =
- rootContainer.getEnvironmentStats(new StatsConfig());
- long nCacheMiss =
- environmentStats.getNCacheMiss() - previousStats.getNCacheMiss();
-
- float cacheMissRate = 0;
- if (deltaCount > 0)
- {
- cacheMissRate = nCacheMiss / (float) deltaCount;
- }
- logger.info(INFO_CACHE_AND_MEMORY_REPORT, freeMemory, cacheMissRate);
- long evictPasses = environmentStats.getNEvictPasses();
- long evictNodes = environmentStats.getNNodesExplicitlyEvicted();
- long evictBinsStrip = environmentStats.getNBINsStripped();
- long cleanerRuns = environmentStats.getNCleanerRuns();
- long cleanerDeletions = environmentStats.getNCleanerDeletions();
- long cleanerEntriesRead = environmentStats.getNCleanerEntriesRead();
- long cleanerINCleaned = environmentStats.getNINsCleaned();
- long checkPoints = environmentStats.getNCheckpoints();
- if (evictPasses != 0)
- {
- if (!evicting)
- {
- evicting = true;
- }
- logger.info(NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS, evictPasses,
- evictNodes, evictBinsStrip);
- }
- if (cleanerRuns != 0)
- {
- logger.info(NOTE_JEB_IMPORT_LDIF_CLEANER_STATS, cleanerRuns,
- cleanerDeletions, cleanerEntriesRead, cleanerINCleaned);
- }
- if (checkPoints > 1)
- {
- logger.info(NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS, checkPoints);
- }
- previousStats = environmentStats;
- }
- catch (DatabaseException e)
- {
- // Unlikely to happen and not critical.
- }
- previousCount = latestCount;
- previousTime = latestTime;
-
- //Do DN index managers first.
- for (IndexManager indexMgrDN : DNIndexMgrList)
- {
- indexMgrDN.printStats(deltaTime);
- }
- //Do non-DN index managers.
- for (IndexManager indexMgr : indexMgrList)
- {
- indexMgr.printStats(deltaTime);
- }
- }
- }
-
- /**
- * A class to hold information about the entry determined by the LDIF reader.
- * Mainly the suffix the entry belongs under and the ID assigned to it by the
- * reader.
- */
- public class EntryInformation
- {
- private EntryID entryID;
- private Suffix suffix;
-
- /**
- * Return the suffix associated with the entry.
- *
- * @return Entry's suffix instance;
- */
- public Suffix getSuffix()
- {
- return suffix;
- }
-
- /**
- * Set the suffix instance associated with the entry.
- *
- * @param suffix
- * The suffix associated with the entry.
- */
- public void setSuffix(Suffix suffix)
- {
- this.suffix = suffix;
- }
-
- /**
- * Set the entry's ID.
- *
- * @param entryID
- * The entry ID to set the entry ID to.
- */
- public void setEntryID(EntryID entryID)
- {
- this.entryID = entryID;
- }
-
- /**
- * Return the entry ID associated with the entry.
- *
- * @return The entry ID associated with the entry.
- */
- public EntryID getEntryID()
- {
- return entryID;
- }
- }
-
- /**
- * This class defines the individual index type available.
- */
- private enum ImportIndexType
- {
- /** The DN index type. */
- DN,
- /** The equality index type. */
- EQUALITY,
- /** The presence index type. */
- PRESENCE,
- /** The sub-string index type. */
- SUBSTRING,
- /** The ordering index type. */
- ORDERING,
- /** The approximate index type. */
- APPROXIMATE,
- /** The extensible sub-string index type. */
- EX_SUBSTRING,
- /** The extensible shared index type. */
- EX_SHARED,
- /** The vlv index type. */
- VLV
- }
-
- /**
- * This class is used as an index key for hash maps that need to process
- * multiple suffix index elements into a single queue and/or maps based on
- * both attribute type and index type (ie., cn.equality, sn.equality,...).
- */
- public static class IndexKey
- {
-
- private final AttributeType attributeType;
- private final String indexName;
- private final int entryLimit;
-
- /**
- * Create index key instance using the specified attribute type, index type
- * and index entry limit.
- *
- * @param attributeType
- * The attribute type.
- * @param indexType
- * The index type.
- * @param entryLimit
- * The entry limit for the index.
- */
- private IndexKey(AttributeType attributeType, String indexName, int entryLimit)
- {
- this.attributeType = attributeType;
- this.indexName = indexName;
- this.entryLimit = entryLimit;
- }
-
- /**
- * An equals method that uses both the attribute type and the index type.
- * Only returns {@code true} if the attribute type and index type are equal.
- *
- * @param obj
- * the object to compare.
- * @return {@code true} if the objects are equal, or {@code false} if they
- * are not.
- */
- @Override
- public boolean equals(Object obj)
- {
- if (obj instanceof IndexKey)
- {
- IndexKey oKey = (IndexKey) obj;
- if (attributeType.equals(oKey.getAttributeType())
- && indexName.equals(oKey.indexName))
- {
- return true;
- }
- }
- return false;
- }
-
- /**
- * A hash code method that adds the hash codes of the attribute type and
- * index type and returns that value.
- *
- * @return The combined hash values of attribute type hash code and the
- * index type hash code.
- */
- @Override
- public int hashCode()
- {
- return attributeType.hashCode() + indexName.hashCode();
- }
-
- /**
- * Return the attribute type.
- *
- * @return The attribute type.
- */
- public AttributeType getAttributeType()
- {
- return attributeType;
- }
-
- /**
- * Return the index type.
- *
- * @return The index type.
- */
- public String getIndexName()
- {
- return indexName;
- }
-
- /**
- * Return the index key name, which is the attribute type primary name, a
- * period, and the index type name. Used for building file names and
- * progress output.
- *
- * @return The index key name.
- */
- public String getName()
- {
- return attributeType.getPrimaryName() + "."
- + StaticUtils.toLowerCase(indexName);
- }
-
- /**
- * Return the entry limit associated with the index.
- *
- * @return The entry limit.
- */
- public int getEntryLimit()
- {
- return entryLimit;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return getClass().getSimpleName()
- + "(index=" + attributeType.getNameOrOID() + "." + indexName
- + ", entryLimit=" + entryLimit
- + ")";
- }
- }
-
- /**
- * The temporary environment will be shared when multiple suffixes are being
- * processed. This interface is used by those suffix instance to do parental
- * checking of the DN cache.
- */
- public static interface DNCache
- {
-
- /**
- * Returns {@code true} if the specified DN is contained in the DN cache, or
- * {@code false} otherwise.
- *
- * @param dn
- * The DN to check the presence of.
- * @return {@code true} if the cache contains the DN, or {@code false} if it
- * is not.
- * @throws DatabaseException
- * If an error occurs reading the database.
- */
- boolean contains(DN dn) throws DatabaseException;
- }
-
- /**
- * Temporary environment used to check DN's when DN validation is performed
- * during phase one processing. It is deleted after phase one processing.
- */
- private final class TmpEnv implements DNCache
- {
- private final String envPath;
- private final Environment environment;
- private static final String DB_NAME = "dn_cache";
- private final Database dnCache;
-
- /**
- * Create a temporary DB environment and database to be used as a cache of
- * DNs when DN validation is performed in phase one processing.
- *
- * @param envPath
- * The file path to create the environment under.
- * @throws DatabaseException
- * If an error occurs either creating the environment or the DN
- * database.
- */
- private TmpEnv(File envPath) throws DatabaseException
- {
- EnvironmentConfig envConfig = new EnvironmentConfig();
- envConfig.setConfigParam(ENV_RUN_CLEANER, "true");
- envConfig.setReadOnly(false);
- envConfig.setAllowCreate(true);
- envConfig.setTransactional(false);
- envConfig.setConfigParam(ENV_IS_LOCKING, "true");
- envConfig.setConfigParam(ENV_RUN_CHECKPOINTER, "false");
- envConfig.setConfigParam(EVICTOR_LRU_ONLY, "false");
- envConfig.setConfigParam(EVICTOR_NODES_PER_SCAN, "128");
- envConfig.setConfigParam(MAX_MEMORY, Long.toString(tmpEnvCacheSize));
- DatabaseConfig dbConfig = new DatabaseConfig();
- dbConfig.setAllowCreate(true);
- dbConfig.setTransactional(false);
- dbConfig.setTemporary(true);
- environment = new Environment(envPath, envConfig);
- dnCache = environment.openDatabase(null, DB_NAME, dbConfig);
- this.envPath = envPath.getPath();
- }
-
- private static final long FNV_INIT = 0xcbf29ce484222325L;
- private static final long FNV_PRIME = 0x100000001b3L;
-
- /** Hash the DN bytes. Uses the FNV-1a hash. */
- private byte[] hashCode(byte[] b)
- {
- long hash = FNV_INIT;
- for (byte aB : b)
- {
- hash ^= aB;
- hash *= FNV_PRIME;
- }
- return JebFormat.entryIDToDatabase(hash);
- }
-
- /**
- * Shutdown the temporary environment.
- *
- * @throws JebException
- * If error occurs.
- */
- private void shutdown() throws JebException
- {
- dnCache.close();
- environment.close();
- EnvManager.removeFiles(envPath);
- }
-
- /**
- * Insert the specified DN into the DN cache. It will return {@code true} if
- * the DN does not already exist in the cache and was inserted, or
- * {@code false} if the DN exists already in the cache.
- *
- * @param dn
- * The DN to insert in the cache.
- * @param val
- * A database entry to use in the insert.
- * @param key
- * A database entry to use in the insert.
- * @return {@code true} if the DN was inserted in the cache, or
- * {@code false} if the DN exists in the cache already and could not
- * be inserted.
- * @throws JebException
- * If an error occurs accessing the database.
- */
- private boolean insert(DN dn, DatabaseEntry val, DatabaseEntry key)
- throws JebException
- {
- byte[] dnBytes = dn.toNormalizedByteString().toByteArray();
- key.setData(hashCode(dnBytes));
-
- byte[] dnBytesForValue = dnBytes;
- int len = PackedInteger.getWriteIntLength(dnBytesForValue.length);
- byte[] dataBytes = new byte[dnBytesForValue.length + len];
- int pos = PackedInteger.writeInt(dataBytes, 0, dnBytesForValue.length);
- System.arraycopy(dnBytesForValue, 0, dataBytes, pos, dnBytesForValue.length);
- val.setData(dataBytes);
-
- return insert(key, val, dnBytesForValue);
- }
-
- private boolean insert(DatabaseEntry key, DatabaseEntry val, byte[] dnBytesForValue)
- throws JebException
- {
- Cursor cursor = null;
- try
- {
- cursor = dnCache.openCursor(null, CursorConfig.DEFAULT);
- OperationStatus status = cursor.putNoOverwrite(key, val);
- if (status == OperationStatus.KEYEXIST)
- {
- DatabaseEntry dns = new DatabaseEntry();
- status = cursor.getSearchKey(key, dns, LockMode.RMW);
- if (status == OperationStatus.NOTFOUND)
- {
- throw new JebException(LocalizableMessage.raw("Search DN cache failed."));
- }
- if (!isDNMatched(dns.getData(), dnBytesForValue))
- {
- addDN(dns.getData(), cursor, dnBytesForValue);
- return true;
- }
- return false;
- }
- return true;
- }
- finally
- {
- close(cursor);
- }
- }
-
- /** Add the DN to the DNs as because of a hash collision. */
- private void addDN(byte[] readDnBytes, Cursor cursor, byte[] dnBytesForValue) throws JebException
- {
- int pLen = PackedInteger.getWriteIntLength(dnBytesForValue.length);
- int totLen = readDnBytes.length + pLen + dnBytesForValue.length;
- byte[] newRec = new byte[totLen];
- System.arraycopy(readDnBytes, 0, newRec, 0, readDnBytes.length);
- int pos = PackedInteger.writeInt(newRec, readDnBytes.length, dnBytesForValue.length);
- System.arraycopy(dnBytesForValue, 0, newRec, pos, dnBytesForValue.length);
- DatabaseEntry newVal = new DatabaseEntry(newRec);
- OperationStatus status = cursor.putCurrent(newVal);
- if (status != OperationStatus.SUCCESS)
- {
- throw new JebException(LocalizableMessage.raw("Add of DN to DN cache failed."));
- }
- }
-
- /** Return true if the specified DN is in the DNs saved as a result of hash collisions. */
- private boolean isDNMatched(byte[] readDnBytes, byte[] dnBytes)
- {
- int pos = 0;
- while (pos < readDnBytes.length)
- {
- int pLen = PackedInteger.getReadIntLength(readDnBytes, pos);
- int len = PackedInteger.readInt(readDnBytes, pos);
- if (indexComparator.compare(readDnBytes, pos + pLen, len, dnBytes, dnBytes.length) == 0)
- {
- return true;
- }
- pos += pLen + len;
- }
- return false;
- }
-
- /**
- * Check if the specified DN is contained in the temporary DN cache.
- *
- * @param dn
- * A DN check for.
- * @return {@code true} if the specified DN is in the temporary DN cache, or
- * {@code false} if it is not.
- */
- @Override
- public boolean contains(DN dn)
- {
- Cursor cursor = null;
- DatabaseEntry key = new DatabaseEntry();
- byte[] dnBytes = dn.toNormalizedByteString().toByteArray();
- key.setData(hashCode(dnBytes));
- try
- {
- cursor = dnCache.openCursor(null, CursorConfig.DEFAULT);
- DatabaseEntry dns = new DatabaseEntry();
- OperationStatus status = cursor.getSearchKey(key, dns, LockMode.DEFAULT);
- return status == OperationStatus.SUCCESS && isDNMatched(dns.getData(), dnBytes);
- }
- finally
- {
- close(cursor);
- }
- }
-
- /**
- * Return temporary environment stats.
- *
- * @param statsConfig
- * A stats configuration instance.
- * @return Environment stats.
- * @throws DatabaseException
- * If an error occurs retrieving the stats.
- */
- private EnvironmentStats getEnvironmentStats(StatsConfig statsConfig)
- throws DatabaseException
- {
- return environment.getStats(statsConfig);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void diskLowThresholdReached(File directory, long thresholdInBytes)
- {
- diskFullThresholdReached(directory, thresholdInBytes);
- }
-
- /** {@inheritDoc} */
- @Override
- public void diskFullThresholdReached(File directory, long thresholdInBytes)
- {
- isCanceled = true;
- Arg2<Object, Number> argMsg = !isPhaseOneDone
- ? ERR_IMPORT_LDIF_LACK_DISK_PHASE_ONE
- : ERR_IMPORT_LDIF_LACK_DISK_PHASE_TWO;
- logger.error(argMsg.get(directory.getAbsolutePath(), thresholdInBytes));
- }
-
- /** {@inheritDoc} */
- @Override
- public void diskSpaceRestored(File directory, long lowThresholdInBytes, long fullThresholdInBytes)
- {
- // Do nothing.
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Index.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Index.java
deleted file mode 100644
index 516622e..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Index.java
+++ /dev/null
@@ -1,840 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2012-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static com.sleepycat.je.OperationStatus.*;
-
-import static org.opends.messages.BackendMessages.*;
-
-import java.util.*;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ConditionResult;
-import org.opends.server.backends.jeb.IndexBuffer.BufferedIndexValues;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.*;
-
-/**
- * Represents an index implemented by a JE database in which each key maps to
- * a set of entry IDs. The key is a byte array, and is constructed from some
- * normalized form of an attribute value (or fragment of a value) appearing
- * in the entry.
- */
-public class Index extends DatabaseContainer
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The indexer object to construct index keys from LDAP attribute values. */
- private Indexer indexer;
-
- /** The limit on the number of entry IDs that may be indexed by one key. */
- private int indexEntryLimit;
- /**
- * Limit on the number of entry IDs that may be retrieved by cursoring
- * through an index.
- */
- private final int cursorEntryLimit;
- /**
- * Number of keys that have exceeded the entry limit since this
- * object was created.
- */
- private int entryLimitExceededCount;
-
- /** The max number of tries to rewrite phantom records. */
- private final int phantomWriteRetries = 3;
-
- /**
- * Whether to maintain a count of IDs for a key once the entry limit
- * has exceeded.
- */
- private final boolean maintainCount;
-
- private final State state;
-
- /**
- * A flag to indicate if this index should be trusted to be consistent
- * with the entries database. If not trusted, we assume that existing
- * entryIDSets for a key is still accurate. However, keys that do not
- * exist are undefined instead of an empty entryIDSet. The following
- * rules will be observed when the index is not trusted:
- *
- * - no entryIDs will be added to a non-existing key.
- * - undefined entryIdSet will be returned whenever a key is not found.
- */
- private boolean trusted;
-
- private final ImportIDSet newImportIDSet;
-
- /**
- * Create a new index object.
- * @param name The name of the index database within the entryContainer.
- * @param indexer The indexer object to construct index keys from LDAP
- * attribute values.
- * @param state The state database to persist index state info.
- * @param indexEntryLimit The configured limit on the number of entry IDs
- * that may be indexed by one key.
- * @param cursorEntryLimit The configured limit on the number of entry IDs
- * @param maintainCount Whether to maintain a count of IDs for a key once
- * the entry limit has exceeded.
- * @param env The JE Environment
- * @param entryContainer The database entryContainer holding this index.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- @SuppressWarnings("unchecked")
- Index(String name, Indexer indexer, State state,
- int indexEntryLimit, int cursorEntryLimit, boolean maintainCount,
- Environment env, EntryContainer entryContainer)
- throws DatabaseException
- {
- super(name, env, entryContainer);
- this.indexer = indexer;
- this.indexEntryLimit = indexEntryLimit;
- this.cursorEntryLimit = cursorEntryLimit;
- this.maintainCount = maintainCount;
- this.newImportIDSet = new ImportIDSet(indexEntryLimit,
- indexEntryLimit, maintainCount);
-
- this.dbConfig = JEBUtils.toDatabaseConfigNoDuplicates(env);
- this.dbConfig.setOverrideBtreeComparator(true);
- this.dbConfig.setBtreeComparator((Class<? extends Comparator<byte[]>>)
- indexer.getComparator().getClass());
-
- this.state = state;
-
- this.trusted = state.getIndexTrustState(null, this);
- if (!trusted && entryContainer.getHighestEntryID().longValue() == 0)
- {
- // If there are no entries in the entry container then there
- // is no reason why this index can't be upgraded to trusted.
- setTrusted(null, true);
- }
- }
-
- void indexEntry(Entry entry, Set<ByteString> keys)
- {
- indexer.indexEntry(entry, keys);
- }
-
- /**
- * Add an add entry ID operation into a index buffer.
- *
- * @param buffer The index buffer to insert the ID into.
- * @param keyBytes The index key bytes.
- * @param entryID The entry ID.
- */
- void insertID(IndexBuffer buffer, ByteString keyBytes, EntryID entryID)
- {
- getBufferedIndexValues(buffer, keyBytes).addEntryID(keyBytes, entryID);
- }
-
- /**
- * Delete the specified import ID set from the import ID set associated with the key.
- *
- * @param key The key to delete the set from.
- * @param importIdSet The import ID set to delete.
- * @param data A database entry to use for data.
- * @throws DatabaseException If a database error occurs.
- */
- public void delete(DatabaseEntry key, ImportIDSet importIdSet, DatabaseEntry data) throws DatabaseException {
- if (read(null, key, data, LockMode.DEFAULT) == SUCCESS) {
- newImportIDSet.clear();
- newImportIDSet.remove(data.getData(), importIdSet);
- if (newImportIDSet.isDefined() && newImportIDSet.size() == 0)
- {
- delete(null, key);
- }
- else
- {
- data.setData(newImportIDSet.toDatabase());
- put(null, key, data);
- }
- } else {
- // Should never happen -- the keys should always be there.
- throw new RuntimeException();
- }
- }
-
- /**
- * Insert the specified import ID set into this index. Creates a DB cursor if needed.
- *
- * @param key The key to add the set to.
- * @param importIdSet The set of import IDs.
- * @param data Database entry to reuse for read.
- * @throws DatabaseException If a database error occurs.
- */
- public void insert(DatabaseEntry key, ImportIDSet importIdSet, DatabaseEntry data) throws DatabaseException {
- final OperationStatus status = read(null, key, data, LockMode.DEFAULT);
- if(status == OperationStatus.SUCCESS) {
- newImportIDSet.clear();
- if (newImportIDSet.merge(data.getData(), importIdSet))
- {
- entryLimitExceededCount++;
- }
- data.setData(newImportIDSet.toDatabase());
- put(null, key, data);
- } else if(status == OperationStatus.NOTFOUND) {
- if(!importIdSet.isDefined()) {
- entryLimitExceededCount++;
- }
- data.setData(importIdSet.toDatabase());
- put(null, key, data);
- } else {
- // Should never happen during import.
- throw new RuntimeException();
- }
- }
-
- /**
- * Update the set of entry IDs for a given key.
- *
- * @param txn A database transaction, or null if none is required.
- * @param key The database key.
- * @param deletedIDs The IDs to remove for the key.
- * @param addedIDs the IDs to add for the key.
- * @throws DatabaseException If a database error occurs.
- */
- void updateKey(Transaction txn, DatabaseEntry key, EntryIDSet deletedIDs, EntryIDSet addedIDs)
- throws DatabaseException
- {
- DatabaseEntry data = new DatabaseEntry();
-
- if(deletedIDs == null && addedIDs == null)
- {
- final OperationStatus status = delete(txn, key);
- if (status != SUCCESS && logger.isTraceEnabled())
- {
- StringBuilder builder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(builder, key.getData(), 4);
- logger.trace("The expected key does not exist in the index %s.\nKey:%s ", name, builder);
- }
- return;
- }
-
- // Handle cases where nothing is changed early to avoid DB access.
- if (isNullOrEmpty(deletedIDs) && isNullOrEmpty(addedIDs))
- {
- return;
- }
-
- if(maintainCount)
- {
- for (int i = 0; i < phantomWriteRetries; i++)
- {
- if (updateKeyWithRMW(txn, key, data, deletedIDs, addedIDs) == SUCCESS)
- {
- return;
- }
- }
- }
- else
- {
- OperationStatus status = read(txn, key, data, LockMode.READ_COMMITTED);
- if(status == OperationStatus.SUCCESS)
- {
- EntryIDSet entryIDList = new EntryIDSet(key.getData(), data.getData());
- if (entryIDList.isDefined())
- {
- for (int i = 0; i < phantomWriteRetries; i++)
- {
- if (updateKeyWithRMW(txn, key, data, deletedIDs, addedIDs) == SUCCESS)
- {
- return;
- }
- }
- }
- }
- else if (trusted)
- {
- if (deletedIDs != null)
- {
- logIndexCorruptError(txn, key);
- }
-
- if (isNotNullOrEmpty(addedIDs))
- {
- data.setData(addedIDs.toDatabase());
-
- status = insert(txn, key, data);
- if(status == OperationStatus.KEYEXIST)
- {
- for (int i = 1; i < phantomWriteRetries; i++)
- {
- if (updateKeyWithRMW(txn, key, data, deletedIDs, addedIDs) == SUCCESS)
- {
- return;
- }
- }
- }
- }
- }
- }
- }
-
- private boolean isNullOrEmpty(EntryIDSet entryIDSet)
- {
- return entryIDSet == null || entryIDSet.size() == 0;
- }
-
- private boolean isNotNullOrEmpty(EntryIDSet entryIDSet)
- {
- return entryIDSet != null && entryIDSet.size() > 0;
- }
-
- private OperationStatus updateKeyWithRMW(Transaction txn,
- DatabaseEntry key,
- DatabaseEntry data,
- EntryIDSet deletedIDs,
- EntryIDSet addedIDs)
- throws DatabaseException
- {
- final OperationStatus status = read(txn, key, data, LockMode.RMW);
- if(status == SUCCESS)
- {
- EntryIDSet entryIDList = computeEntryIDList(key, data, deletedIDs, addedIDs);
- byte[] after = entryIDList.toDatabase();
- if (after != null)
- {
- data.setData(after);
- return put(txn, key, data);
- }
- else
- {
- // No more IDs, so remove the key. If index is not
- // trusted then this will cause all subsequent reads
- // for this key to return undefined set.
- return delete(txn, key);
- }
- }
- else if (trusted)
- {
- if (deletedIDs != null)
- {
- logIndexCorruptError(txn, key);
- }
-
- if (isNotNullOrEmpty(addedIDs))
- {
- data.setData(addedIDs.toDatabase());
- return insert(txn, key, data);
- }
- }
- return OperationStatus.SUCCESS;
- }
-
- private EntryIDSet computeEntryIDList(DatabaseEntry key, DatabaseEntry data, EntryIDSet deletedIDs,
- EntryIDSet addedIDs)
- {
- EntryIDSet entryIDList = new EntryIDSet(key.getData(), data.getData());
- if(addedIDs != null)
- {
- if(entryIDList.isDefined() && indexEntryLimit > 0)
- {
- long idCountDelta = addedIDs.size();
- if(deletedIDs != null)
- {
- idCountDelta -= deletedIDs.size();
- }
- if(idCountDelta + entryIDList.size() >= indexEntryLimit)
- {
- if(maintainCount)
- {
- entryIDList = new EntryIDSet(entryIDList.size() + idCountDelta);
- }
- else
- {
- entryIDList = new EntryIDSet();
- }
- entryLimitExceededCount++;
-
- if(logger.isTraceEnabled())
- {
- StringBuilder builder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(builder, key.getData(), 4);
- logger.trace("Index entry exceeded in index %s. " +
- "Limit: %d. ID list size: %d.\nKey:%s",
- name, indexEntryLimit, idCountDelta + addedIDs.size(), builder);
-
- }
- }
- else
- {
- entryIDList.addAll(addedIDs);
- if(deletedIDs != null)
- {
- entryIDList.deleteAll(deletedIDs);
- }
- }
- }
- else
- {
- entryIDList.addAll(addedIDs);
- if(deletedIDs != null)
- {
- entryIDList.deleteAll(deletedIDs);
- }
- }
- }
- else if(deletedIDs != null)
- {
- entryIDList.deleteAll(deletedIDs);
- }
- return entryIDList;
- }
-
- /**
- * Add an remove entry ID operation into a index buffer.
- *
- * @param buffer The index buffer to insert the ID into.
- * @param keyBytes The index key bytes.
- * @param entryID The entry ID.
- */
- void removeID(IndexBuffer buffer, ByteString keyBytes, EntryID entryID)
- {
- getBufferedIndexValues(buffer, keyBytes).deleteEntryID(keyBytes, entryID);
- }
-
- private void logIndexCorruptError(Transaction txn, DatabaseEntry key)
- {
- if (logger.isTraceEnabled())
- {
- StringBuilder builder = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(builder, key.getData(), 4);
- logger.trace("The expected key does not exist in the index %s.\nKey:%s", name, builder);
- }
-
- setTrusted(txn, false);
- logger.error(ERR_INDEX_CORRUPT_REQUIRES_REBUILD, name);
- }
-
- /**
- * Buffered delete of a key from the JE database.
- * @param buffer The index buffer to use to store the deleted keys
- * @param keyBytes The index key bytes.
- */
- public void delete(IndexBuffer buffer, ByteString keyBytes)
- {
- getBufferedIndexValues(buffer, keyBytes);
- }
-
- private BufferedIndexValues getBufferedIndexValues(IndexBuffer buffer, ByteString keyBytes)
- {
- return buffer.getBufferedIndexValues(this, keyBytes, indexer.getBSComparator());
- }
-
- /**
- * Check if an entry ID is in the set of IDs indexed by a given key.
- *
- * @param txn A database transaction, or null if none is required.
- * @param key The index key.
- * @param entryID The entry ID.
- * @return true if the entry ID is indexed by the given key,
- * false if it is not indexed by the given key,
- * undefined if the key has exceeded the entry limit.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public ConditionResult containsID(Transaction txn, DatabaseEntry key, EntryID entryID)
- throws DatabaseException
- {
- DatabaseEntry data = new DatabaseEntry();
-
- OperationStatus status = read(txn, key, data, LockMode.DEFAULT);
- if (status == SUCCESS)
- {
- EntryIDSet entryIDList = new EntryIDSet(key.getData(), data.getData());
- if (!entryIDList.isDefined())
- {
- return ConditionResult.UNDEFINED;
- }
- return ConditionResult.valueOf(entryIDList.contains(entryID));
- }
- else if (trusted)
- {
- return ConditionResult.FALSE;
- }
- else
- {
- return ConditionResult.UNDEFINED;
- }
- }
-
- /**
- * Reads the set of entry IDs for a given key.
- *
- * @param key The database key.
- * @param txn A database transaction, or null if none is required.
- * @param lockMode The JE locking mode to be used for the database read.
- * @return The entry IDs indexed by this key.
- */
- public EntryIDSet readKey(DatabaseEntry key, Transaction txn, LockMode lockMode)
- {
- try
- {
- DatabaseEntry data = new DatabaseEntry();
- OperationStatus status = read( txn, key, data, lockMode);
- if (status != SUCCESS)
- {
- if(trusted)
- {
- return new EntryIDSet(key.getData(), null);
- }
- else
- {
- return new EntryIDSet();
- }
- }
- return new EntryIDSet(key.getData(), data.getData());
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- return new EntryIDSet();
- }
- }
-
- /**
- * Writes the set of entry IDs for a given key.
- *
- * @param key The database key.
- * @param entryIDList The entry IDs indexed by this key.
- * @param txn A database transaction, or null if none is required.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public void writeKey(Transaction txn, DatabaseEntry key, EntryIDSet entryIDList)
- throws DatabaseException
- {
- DatabaseEntry data = new DatabaseEntry();
- byte[] after = entryIDList.toDatabase();
- if (after != null)
- {
- if (!entryIDList.isDefined())
- {
- entryLimitExceededCount++;
- }
- data.setData(after);
- put(txn, key, data);
- }
- else
- {
- // No more IDs, so remove the key.
- delete(txn, key);
- }
- }
-
- /**
- * Reads a range of keys and collects all their entry IDs into a
- * single set.
- *
- * @param lower The lower bound of the range. A 0 length byte array indicates
- * no lower bound and the range will start from the
- * smallest key.
- * @param upper The upper bound of the range. A 0 length byte array indicates
- * no upper bound and the range will end at the largest
- * key.
- * @param lowerIncluded true if a key exactly matching the lower bound
- * is included in the range, false if only keys
- * strictly greater than the lower bound are included.
- * This value is ignored if the lower bound is not
- * specified.
- * @param upperIncluded true if a key exactly matching the upper bound
- * is included in the range, false if only keys
- * strictly less than the upper bound are included.
- * This value is ignored if the upper bound is not
- * specified.
- * @return The set of entry IDs.
- */
- public EntryIDSet readRange(byte[] lower, byte[] upper,
- boolean lowerIncluded, boolean upperIncluded)
- {
- // If this index is not trusted, then just return an undefined id set.
- if (!trusted)
- {
- return new EntryIDSet();
- }
-
- try
- {
- // Total number of IDs found so far.
- int totalIDCount = 0;
- LockMode lockMode = LockMode.DEFAULT;
-
- DatabaseEntry data = new DatabaseEntry();
- DatabaseEntry key;
-
- ArrayList<EntryIDSet> lists = new ArrayList<>();
-
- Cursor cursor = openCursor(null, CursorConfig.READ_COMMITTED);
- try
- {
- final Comparator<byte[]> comparator = indexer.getComparator();
- OperationStatus status;
- // Set the lower bound if necessary.
- if(lower.length > 0)
- {
- key = new DatabaseEntry(lower);
-
- // Initialize the cursor to the lower bound.
- status = cursor.getSearchKeyRange(key, data, lockMode);
-
- // Advance past the lower bound if necessary.
- if (status == SUCCESS && !lowerIncluded &&
- comparator.compare(key.getData(), lower) == 0)
- {
- // Do not include the lower value.
- status = cursor.getNext(key, data, lockMode);
- }
- }
- else
- {
- key = new DatabaseEntry();
- status = cursor.getNext(key, data, lockMode);
- }
-
- if (status != OperationStatus.SUCCESS)
- {
- // There are no values.
- return new EntryIDSet(key.getData(), null);
- }
-
- // Step through the keys until we hit the upper bound or the last key.
- while (status == OperationStatus.SUCCESS)
- {
- // Check against the upper bound if necessary
- if(upper.length > 0)
- {
- int cmp = comparator.compare(key.getData(), upper);
- if (cmp > 0 || (cmp == 0 && !upperIncluded))
- {
- break;
- }
- }
- EntryIDSet list = new EntryIDSet(key.getData(), data.getData());
- if (!list.isDefined())
- {
- // There is no point continuing.
- return list;
- }
- totalIDCount += list.size();
- if (cursorEntryLimit > 0 && totalIDCount > cursorEntryLimit)
- {
- // There are too many. Give up and return an undefined list.
- return new EntryIDSet();
- }
- lists.add(list);
- status = cursor.getNext(key, data, LockMode.DEFAULT);
- }
-
- return EntryIDSet.unionOfSets(lists, false);
- }
- finally
- {
- cursor.close();
- }
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- return new EntryIDSet();
- }
- }
-
- /**
- * Get the number of keys that have exceeded the entry limit since this
- * object was created.
- * @return The number of keys that have exceeded the entry limit since this
- * object was created.
- */
- public int getEntryLimitExceededCount()
- {
- return entryLimitExceededCount;
- }
-
- /**
- * Update the index buffer for a deleted entry.
- *
- * @param buffer The index buffer to use to store the deleted keys
- * @param entryID The entry ID.
- * @param entry The entry to be indexed.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- public void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry) throws DatabaseException, DirectoryException
- {
- final Set<ByteString> addKeys = new HashSet<>();
- indexer.indexEntry(entry, addKeys);
-
- for (ByteString keyBytes : addKeys)
- {
- insertID(buffer, keyBytes, entryID);
- }
- }
-
- /**
- * Update the index buffer for a deleted entry.
- *
- * @param buffer The index buffer to use to store the deleted keys
- * @param entryID The entry ID
- * @param entry The contents of the deleted entry.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- public void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry)
- throws DatabaseException, DirectoryException
- {
- final Set<ByteString> delKeys = new HashSet<>();
- indexer.indexEntry(entry, delKeys);
-
- for (ByteString keyBytes : delKeys)
- {
- removeID(buffer, keyBytes, entryID);
- }
- }
-
- /**
- * Update the index to reflect a sequence of modifications in a Modify
- * operation.
- *
- * @param buffer The index buffer to use to store the deleted keys
- * @param entryID The ID of the entry that was modified.
- * @param oldEntry The entry before the modifications were applied.
- * @param newEntry The entry after the modifications were applied.
- * @param mods The sequence of modifications in the Modify operation.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public void modifyEntry(IndexBuffer buffer,
- EntryID entryID,
- Entry oldEntry,
- Entry newEntry,
- List<Modification> mods)
- throws DatabaseException
- {
- final Map<ByteString, Boolean> modifiedKeys = new TreeMap<>(indexer.getBSComparator());
- indexer.modifyEntry(oldEntry, newEntry, mods, modifiedKeys);
-
- for (Map.Entry<ByteString, Boolean> modifiedKey : modifiedKeys.entrySet())
- {
- if(modifiedKey.getValue())
- {
- insertID(buffer, modifiedKey.getKey(), entryID);
- }
- else
- {
- removeID(buffer, modifiedKey.getKey(), entryID);
- }
- }
- }
-
- /**
- * Set the index entry limit.
- *
- * @param indexEntryLimit The index entry limit to set.
- * @return True if a rebuild is required or false otherwise.
- */
- public boolean setIndexEntryLimit(int indexEntryLimit)
- {
- final boolean rebuildRequired =
- this.indexEntryLimit < indexEntryLimit && entryLimitExceededCount > 0;
- this.indexEntryLimit = indexEntryLimit;
- return rebuildRequired;
- }
-
- /**
- * Set the indexer.
- *
- * @param indexer The indexer to set
- */
- public void setIndexer(Indexer indexer)
- {
- this.indexer = indexer;
- }
-
- /**
- * Return entry limit.
- *
- * @return The entry limit.
- */
- public int getIndexEntryLimit() {
- return this.indexEntryLimit;
- }
-
- /**
- * Set the index trust state.
- * @param txn A database transaction, or null if none is required.
- * @param trusted True if this index should be trusted or false
- * otherwise.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public synchronized void setTrusted(Transaction txn, boolean trusted)
- throws DatabaseException
- {
- this.trusted = trusted;
- state.putIndexTrustState(txn, this, trusted);
- }
-
- /**
- * Return true iff this index is trusted.
- * @return the trusted state of this index
- */
- public synchronized boolean isTrusted()
- {
- return trusted;
- }
-
- /**
- * Return <code>true</code> iff this index is being rebuilt.
- * @return The rebuild state of this index
- */
- public synchronized boolean isRebuildRunning()
- {
- return false; // FIXME inline?
- }
-
- /**
- * Whether this index maintains a count of IDs for keys once the
- * entry limit has exceeded.
- * @return <code>true</code> if this index maintains court of IDs
- * or <code>false</code> otherwise
- */
- public boolean getMaintainCount()
- {
- return maintainCount;
- }
-
- /**
- * Return an indexes comparator.
- *
- * @return The comparator related to an index.
- */
- public Comparator<byte[]> getComparator()
- {
- return indexer.getComparator();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexBuffer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexBuffer.java
deleted file mode 100644
index c718f32..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexBuffer.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.*;
-
-import org.forgerock.opendj.ldap.ByteSequence;
-import org.forgerock.opendj.ldap.ByteString;
-import org.opends.server.types.DirectoryException;
-
-import com.sleepycat.je.DatabaseEntry;
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.Transaction;
-
-/**
- * A buffered index is used to buffer multiple reads or writes to the
- * same index key into a single read or write.
- * It can only be used to buffer multiple reads and writes under
- * the same transaction. The transaction may be null if it is known
- * that there are no other concurrent updates to the index.
- */
-public class IndexBuffer
-{
- private final EntryContainer entryContainer;
-
- /**
- * The buffered records stored as a map from the record key to the
- * buffered value for that key for each index.
- */
- private final LinkedHashMap<Index, TreeMap<ByteString, BufferedIndexValues>> bufferedIndexes = new LinkedHashMap<>();
- /** The buffered records stored as a set of buffered VLV values for each index. */
- private final LinkedHashMap<VLVIndex, BufferedVLVValues> bufferedVLVIndexes = new LinkedHashMap<>();
-
- /** A simple class representing a pair of added and deleted indexed IDs. */
- static class BufferedIndexValues
- {
- private EntryIDSet addedIDs;
- private EntryIDSet deletedIDs;
-
- /**
- * Adds the provided entryID to this object associating it with the provided keyBytes.
- *
- * @param keyBytes the keyBytes mapping for this entryID
- * @param entryID the entryID to add
- */
- void addEntryID(ByteString keyBytes, EntryID entryID)
- {
- if (!remove(deletedIDs, entryID))
- {
- if (this.addedIDs == null)
- {
- this.addedIDs = new EntryIDSet(keyBytes, null);
- }
- this.addedIDs.add(entryID);
- }
- }
-
- /**
- * Deletes the provided entryID from this object.
- *
- * @param keyBytes the keyBytes mapping for this entryID
- * @param entryID the entryID to delete
- */
- void deleteEntryID(ByteString keyBytes, EntryID entryID)
- {
- if (!remove(addedIDs, entryID))
- {
- if (this.deletedIDs == null)
- {
- this.deletedIDs = new EntryIDSet(keyBytes, null);
- }
- this.deletedIDs.add(entryID);
- }
- }
-
- private boolean remove(EntryIDSet ids, EntryID entryID)
- {
- if (ids != null && ids.contains(entryID))
- {
- ids.remove(entryID);
- return true;
- }
- return false;
- }
- }
-
- /** A simple class representing a pair of added and deleted VLV values. */
- static class BufferedVLVValues
- {
- private TreeSet<SortValues> addedValues;
- private TreeSet<SortValues> deletedValues;
-
- /**
- * Adds the provided values to this object.
- *
- * @param sortValues the values to add
- */
- void addValues(SortValues sortValues)
- {
- if (!remove(deletedValues, sortValues))
- {
- if (this.addedValues == null)
- {
- this.addedValues = new TreeSet<>();
- }
- this.addedValues.add(sortValues);
- }
- }
-
- /**
- * Deletes the provided values from this object.
- *
- * @param sortValues the values to delete
- */
- void deleteValues(SortValues sortValues)
- {
- if (!remove(addedValues, sortValues))
- {
- if (this.deletedValues == null)
- {
- this.deletedValues = new TreeSet<>();
- }
- this.deletedValues.add(sortValues);
- }
- }
-
- private boolean remove(TreeSet<SortValues> values, SortValues sortValues)
- {
- if (values != null && values.contains(sortValues))
- {
- values.remove(sortValues);
- return true;
- }
- return false;
- }
- }
-
- /**
- * Construct a new empty index buffer object.
- *
- * @param entryContainer The database entryContainer using this
- * index buffer.
- */
- public IndexBuffer(EntryContainer entryContainer)
- {
- this.entryContainer = entryContainer;
- }
-
- /**
- * Get the buffered VLV values for the given VLV index.
- *
- * @param vlvIndex The VLV index with the buffered values to retrieve.
- * @return The buffered VLV values or <code>null</code> if there are
- * no buffered VLV values for the specified VLV index.
- */
- public BufferedVLVValues getVLVIndex(VLVIndex vlvIndex)
- {
- BufferedVLVValues bufferedValues = bufferedVLVIndexes.get(vlvIndex);
- if (bufferedValues == null)
- {
- bufferedValues = new BufferedVLVValues();
- bufferedVLVIndexes.put(vlvIndex, bufferedValues);
- }
- return bufferedValues;
- }
-
- /**
- * Get the buffered index values for the given index and keyBytes.
- *
- * @param index
- * The index for which to retrieve the buffered index values
- * @param keyBytes
- * The keyBytes for which to retrieve the buffered index values
- * @param bsComparator
- * The byte sequence comparator to use when retrieving the
- * BufferedIndexValues
- * @return The buffered index values, it can never be null
- */
- BufferedIndexValues getBufferedIndexValues(Index index, ByteString keyBytes, Comparator<ByteSequence> bsComparator)
- {
- BufferedIndexValues values = null;
-
- TreeMap<ByteString, BufferedIndexValues> bufferedOperations = bufferedIndexes.get(index);
- if (bufferedOperations == null)
- {
- bufferedOperations = new TreeMap<>(bsComparator);
- bufferedIndexes.put(index, bufferedOperations);
- }
- else
- {
- values = bufferedOperations.get(keyBytes);
- }
-
- if (values == null)
- {
- values = new BufferedIndexValues();
- bufferedOperations.put(keyBytes, values);
- }
- return values;
- }
-
- /**
- * Flush the buffered index changes until the given transaction to
- * the database.
- *
- * @param txn The database transaction to be used for the updates.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- public void flush(Transaction txn) throws DatabaseException, DirectoryException
- {
- DatabaseEntry key = new DatabaseEntry();
-
- for (AttributeIndex attributeIndex : entryContainer.getAttributeIndexes())
- {
- for (Index index : attributeIndex.getAllIndexes())
- {
- updateKeys(index, txn, key, bufferedIndexes.remove(index));
- }
- }
-
- for (VLVIndex vlvIndex : entryContainer.getVLVIndexes())
- {
- BufferedVLVValues bufferedVLVValues = bufferedVLVIndexes.remove(vlvIndex);
- if (bufferedVLVValues != null)
- {
- vlvIndex.updateIndex(txn, bufferedVLVValues.addedValues, bufferedVLVValues.deletedValues);
- }
- }
-
- final Index id2children = entryContainer.getID2Children();
- updateKeys(id2children, txn, key, bufferedIndexes.remove(id2children));
-
- final Index id2subtree = entryContainer.getID2Subtree();
- final TreeMap<ByteString, BufferedIndexValues> bufferedValues = bufferedIndexes.remove(id2subtree);
- if (bufferedValues != null)
- {
- /*
- * OPENDJ-1375: add keys in reverse order to be consistent with single
- * entry processing in add/delete processing. This is necessary in order
- * to avoid deadlocks.
- */
- updateKeys(id2subtree, txn, key, bufferedValues.descendingMap());
- }
- }
-
- private void updateKeys(Index index, Transaction txn, DatabaseEntry key,
- Map<ByteString, BufferedIndexValues> bufferedValues)
- {
- if (bufferedValues != null)
- {
- final Iterator<Map.Entry<ByteString, BufferedIndexValues>> it = bufferedValues.entrySet().iterator();
- while (it.hasNext())
- {
- final Map.Entry<ByteString, BufferedIndexValues> entry = it.next();
- final ByteString bufferedKey = entry.getKey();
- final BufferedIndexValues values = entry.getValue();
-
- key.setData(bufferedKey.toByteArray());
- index.updateKey(txn, key, values.deletedIDs, values.addedIDs);
-
- it.remove();
- }
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexFilter.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexFilter.java
deleted file mode 100644
index ed5c111..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexFilter.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions copyright 2011-2015 ForgeRock AS
- *
- */
-package org.opends.server.backends.jeb;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.opends.server.backends.jeb.AttributeIndex.IndexFilterType;
-import org.opends.server.core.SearchOperation;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.FilterType;
-import org.opends.server.types.SearchFilter;
-
-import static org.opends.messages.BackendMessages.*;
-
-/**
- * An index filter is used to apply a search operation to a set of indexes
- * to generate a set of candidate entries.
- */
-public class IndexFilter
-{
- /**
- * Stop processing the filter against the indexes when the
- * number of candidates is smaller than this value.
- */
- public static final int FILTER_CANDIDATE_THRESHOLD = 10;
-
- /**
- * The entry entryContainer holding the attribute indexes.
- */
- private final EntryContainer entryContainer;
-
- /**
- * The search operation provides the search base, scope and filter.
- * It can also be checked periodically for cancellation.
- */
- private final SearchOperation searchOp;
-
- /**
- * A string builder to hold a diagnostic string which helps determine
- * how the indexed contributed to the search operation.
- */
- private final StringBuilder buffer;
-
- private final DatabaseEnvironmentMonitor monitor;
-
- /**
- * Construct an index filter for a search operation.
- *
- * @param entryContainer The entry entryContainer.
- * @param searchOp The search operation to be evaluated.
- * @param monitor The monitor to gather filter usage stats.
- *
- * @param debugBuilder If not null, a diagnostic string will be written
- * which will help determine how the indexes contributed
- * to this search.
- */
- public IndexFilter(EntryContainer entryContainer,
- SearchOperation searchOp,
- StringBuilder debugBuilder,
- DatabaseEnvironmentMonitor monitor)
- {
- this.entryContainer = entryContainer;
- this.searchOp = searchOp;
- this.buffer = debugBuilder;
- this.monitor = monitor;
- }
-
- /**
- * Evaluate the search operation against the indexes.
- *
- * @return A set of entry IDs representing candidate entries.
- */
- public EntryIDSet evaluate()
- {
- if (buffer != null)
- {
- buffer.append("filter=");
- }
- return evaluateFilter(searchOp.getFilter());
- }
-
- /**
- * Evaluate a search filter against the indexes.
- *
- * @param filter The search filter to be evaluated.
- * @return A set of entry IDs representing candidate entries.
- */
- private EntryIDSet evaluateFilter(SearchFilter filter)
- {
- EntryIDSet candidates = evaluate(filter);
- if (buffer != null)
- {
- candidates.toString(buffer);
- }
- return candidates;
- }
-
- private EntryIDSet evaluate(SearchFilter filter)
- {
- switch (filter.getFilterType())
- {
- case AND:
- if (buffer != null)
- {
- buffer.append("(&");
- }
- final EntryIDSet res1 = evaluateLogicalAndFilter(filter);
- if (buffer != null)
- {
- buffer.append(")");
- }
- return res1;
-
- case OR:
- if (buffer != null)
- {
- buffer.append("(|");
- }
- final EntryIDSet res2 = evaluateLogicalOrFilter(filter);
- if (buffer != null)
- {
- buffer.append(")");
- }
- return res2;
-
- case EQUALITY:
- return evaluateFilterWithDiagnostic(IndexFilterType.EQUALITY, filter);
-
- case GREATER_OR_EQUAL:
- return evaluateFilterWithDiagnostic(IndexFilterType.GREATER_OR_EQUAL, filter);
-
- case SUBSTRING:
- return evaluateFilterWithDiagnostic(IndexFilterType.SUBSTRING, filter);
-
- case LESS_OR_EQUAL:
- return evaluateFilterWithDiagnostic(IndexFilterType.LESS_OR_EQUAL, filter);
-
- case PRESENT:
- return evaluateFilterWithDiagnostic(IndexFilterType.PRESENCE, filter);
-
- case APPROXIMATE_MATCH:
- return evaluateFilterWithDiagnostic(IndexFilterType.APPROXIMATE, filter);
-
- case EXTENSIBLE_MATCH:
- if (buffer!= null)
- {
- filter.toString(buffer);
- }
- return evaluateExtensibleFilter(filter);
-
- case NOT:
- default:
- if (buffer != null)
- {
- filter.toString(buffer);
- }
- //NYI
- return new EntryIDSet();
- }
- }
-
- /**
- * Evaluate a logical AND search filter against the indexes.
- *
- * @param andFilter The AND search filter to be evaluated.
- * @return A set of entry IDs representing candidate entries.
- */
- private EntryIDSet evaluateLogicalAndFilter(SearchFilter andFilter)
- {
- // Start off with an undefined set.
- EntryIDSet results = new EntryIDSet();
-
- // Put the slow range filters (greater-or-equal, less-or-equal)
- // into a hash map, the faster components (equality, presence, approx)
- // into one list and the remainder into another list.
-
- ArrayList<SearchFilter> fastComps = new ArrayList<>();
- ArrayList<SearchFilter> otherComps = new ArrayList<>();
- HashMap<AttributeType, ArrayList<SearchFilter>> rangeComps = new HashMap<>();
-
- for (SearchFilter filter : andFilter.getFilterComponents())
- {
- FilterType filterType = filter.getFilterType();
- if (filterType == FilterType.GREATER_OR_EQUAL ||
- filterType == FilterType.LESS_OR_EQUAL)
- {
- ArrayList<SearchFilter> rangeList;
- rangeList = rangeComps.get(filter.getAttributeType());
- if (rangeList == null)
- {
- rangeList = new ArrayList<>();
- rangeComps.put(filter.getAttributeType(), rangeList);
- }
- rangeList.add(filter);
- }
- else if (filterType == FilterType.EQUALITY ||
- filterType == FilterType.PRESENT ||
- filterType == FilterType.APPROXIMATE_MATCH)
- {
- fastComps.add(filter);
- }
- else
- {
- otherComps.add(filter);
- }
- }
-
- // First, process the fast components.
- if (evaluateFilters(results, fastComps)
- // Next, process the other (non-range) components.
- || evaluateFilters(results, otherComps)
- // Are there any range component pairs like (cn>=A)(cn<=B) ?
- || rangeComps.isEmpty())
- {
- return results;
- }
-
- // Next, process range component pairs like (cn>=A)(cn<=B).
- ArrayList<SearchFilter> remainComps = new ArrayList<>();
- for (Map.Entry<AttributeType, ArrayList<SearchFilter>> rangeEntry : rangeComps.entrySet())
- {
- ArrayList<SearchFilter> rangeList = rangeEntry.getValue();
- if (rangeList.size() == 2)
- {
- SearchFilter filter1 = rangeList.get(0);
- SearchFilter filter2 = rangeList.get(1);
-
- AttributeIndex attributeIndex = entryContainer.getAttributeIndex(rangeEntry.getKey());
- if (attributeIndex == null)
- {
- if(monitor.isFilterUseEnabled())
- {
- monitor.updateStats(SearchFilter.createANDFilter(rangeList),
- INFO_INDEX_FILTER_INDEX_TYPE_DISABLED.get("ordering", rangeEntry.getKey().getNameOrOID()));
- }
- continue;
- }
-
- EntryIDSet set = attributeIndex.evaluateBoundedRange(filter1, filter2, buffer, monitor);
- if(monitor.isFilterUseEnabled() && set.isDefined())
- {
- monitor.updateStats(SearchFilter.createANDFilter(rangeList), set.size());
- }
- if (retainAll(results, set))
- {
- return results;
- }
- }
- else
- {
- // Add to the remaining range components to be processed.
- remainComps.addAll(rangeList);
- }
- }
-
- // Finally, process the remaining slow range components.
- evaluateFilters(results, remainComps);
-
- return results;
- }
-
- private boolean evaluateFilters(EntryIDSet results, ArrayList<SearchFilter> filters)
- {
- for (SearchFilter filter : filters)
- {
- final EntryIDSet filteredSet = evaluateFilter(filter);
- if (retainAll(results, filteredSet))
- {
- return true;
- }
- }
- return false;
- }
-
- /**
- * Retain all IDs in a given set that appear in a second set.
- *
- * @param a The set of entry IDs to be updated.
- * @param b Only those IDs that are in this set are retained.
- * @return true if the number of IDs in the updated set is now below
- * the filter candidate threshold.
- */
- private boolean retainAll(EntryIDSet a, EntryIDSet b)
- {
- a.retainAll(b);
-
- // We may have reached the point of diminishing returns where
- // it is quicker to stop now and process the current small number of candidates.
- return a.isDefined() && a.size() <= FILTER_CANDIDATE_THRESHOLD;
- }
-
- /**
- * Evaluate a logical OR search filter against the indexes.
- *
- * @param orFilter The OR search filter to be evaluated.
- * @return A set of entry IDs representing candidate entries.
- */
- private EntryIDSet evaluateLogicalOrFilter(SearchFilter orFilter)
- {
- ArrayList<EntryIDSet> candidateSets = new ArrayList<>(orFilter.getFilterComponents().size());
-
- for (SearchFilter filter : orFilter.getFilterComponents())
- {
- EntryIDSet set = evaluateFilter(filter);
- if (!set.isDefined())
- {
- // There is no point continuing.
- return set;
- }
- candidateSets.add(set);
- }
- return EntryIDSet.unionOfSets(candidateSets, false);
- }
-
- private EntryIDSet evaluateFilterWithDiagnostic(IndexFilterType indexFilterType, SearchFilter filter)
- {
- if (buffer != null)
- {
- filter.toString(buffer);
- }
- return evaluateFilter(indexFilterType, filter);
- }
-
- private EntryIDSet evaluateFilter(IndexFilterType indexFilterType, SearchFilter filter)
- {
- AttributeIndex attributeIndex = entryContainer.getAttributeIndex(filter.getAttributeType());
- if (attributeIndex != null)
- {
- return attributeIndex.evaluateFilter(indexFilterType, filter, buffer, monitor);
- }
-
- if (monitor.isFilterUseEnabled())
- {
- monitor.updateStats(filter, INFO_INDEX_FILTER_INDEX_TYPE_DISABLED.get(
- indexFilterType.toString(), filter.getAttributeType().getNameOrOID()));
- }
- return new EntryIDSet();
- }
-
- /**
- * Evaluate an extensible filter against the indexes.
- *
- * @param extensibleFilter The extensible filter to be evaluated.
- * @return A set of entry IDs representing candidate entries.
- */
- private EntryIDSet evaluateExtensibleFilter(SearchFilter extensibleFilter)
- {
- if (extensibleFilter.getDNAttributes())
- {
- // This will always be unindexed since the filter potentially matches
- // entries containing the specified attribute type as well as any entry
- // containing the attribute in its DN as part of a superior RDN.
- return IndexQuery.createNullIndexQuery().evaluate(null);
- }
-
- AttributeIndex attributeIndex = entryContainer.getAttributeIndex(extensibleFilter.getAttributeType());
- if (attributeIndex != null)
- {
- return attributeIndex.evaluateExtensibleFilter(extensibleFilter, buffer, monitor);
- }
- return IndexQuery.createNullIndexQuery().evaluate(null);
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexInputBuffer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexInputBuffer.java
deleted file mode 100644
index 70bc959..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexInputBuffer.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2010 Sun Microsystems, Inc.
- * Portions Copyright 2012-2015 ForgeRock AS.
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.messages.BackendMessages.*;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.opends.server.backends.jeb.Importer.IndexManager;
-
-import com.sleepycat.util.PackedInteger;
-
-/**
- * The buffer class is used to process a buffer from the temporary index files
- * during phase 2 processing.
- */
-final class IndexInputBuffer implements Comparable<IndexInputBuffer>
-{
-
- /** Possible states while reading a record. */
- private static enum RecordState
- {
- START, NEED_INSERT_ID_SET, NEED_DELETE_ID_SET
- }
-
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
- static final long UNDEFINED_SIZE = -1;
-
- private final IndexManager indexMgr;
- private final FileChannel channel;
- private final long begin;
- private final long end;
- private final int bufferID;
-
- private long offset;
- private final ByteBuffer cache;
-
- /** Next fields are the fetched record data. */
- private Integer indexID;
- private ByteBuffer keyBuf = ByteBuffer.allocate(128);
- private RecordState recordState = RecordState.START;
-
- /**
- * Creates a new index input buffer.
- *
- * @param indexMgr
- * The index manager.
- * @param channel
- * The index file channel.
- * @param begin
- * The position of the start of the buffer in the scratch file.
- * @param end
- * The position of the end of the buffer in the scratch file.
- * @param bufferID
- * The buffer ID.
- * @param cacheSize
- * The cache size.
- * @throws IOException
- * If an IO error occurred when priming the cache.
- */
- public IndexInputBuffer(IndexManager indexMgr, FileChannel channel,
- long begin, long end, int bufferID, int cacheSize) throws IOException
- {
- this.indexMgr = indexMgr;
- this.channel = channel;
- this.begin = begin;
- this.end = end;
- this.offset = 0;
- this.bufferID = bufferID;
- this.cache = ByteBuffer.allocate(Math.max(cacheSize - 384, 256));
-
- loadCache();
- cache.flip();
- keyBuf.flip();
- }
-
- private void loadCache() throws IOException
- {
- channel.position(begin + offset);
- long leftToRead = end - (begin + offset);
- long bytesToRead;
- if (leftToRead < cache.remaining())
- {
- cache.limit((int) (cache.position() + leftToRead));
- bytesToRead = leftToRead;
- }
- else
- {
- bytesToRead = Math.min(end - offset, cache.remaining());
- }
- int bytesRead = 0;
- while (bytesRead < bytesToRead)
- {
- bytesRead += channel.read(cache);
- }
- offset += bytesRead;
- indexMgr.addBytesRead(bytesRead);
- }
-
- /**
- * Returns {@code true} if this buffer has more data.
- *
- * @return {@code true} if this buffer has more data.
- * @throws IOException
- * If an IO error occurred.
- */
- public boolean hasMoreData() throws IOException
- {
- boolean hasMore = begin + offset < end;
- return cache.remaining() != 0 || hasMore;
- }
-
- /**
- * Returns the length of the next key.
- *
- * @return The length of the next key.
- */
- public int getKeyLen()
- {
- return keyBuf.limit();
- }
-
- /**
- * Fetches the next key into the provided byte buffer.
- *
- * @param b
- * A buffer where to fetch the key
- */
- public void fetchKey(ByteBuffer b)
- {
- keyBuf.get(b.array(), 0, keyBuf.limit());
- b.limit(keyBuf.limit());
- }
-
- /**
- * Returns the index ID of the next record.
- *
- * @return The index ID of the next record.
- */
- public Integer getIndexID()
- {
- if (indexID == null)
- {
- try
- {
- fetchNextRecord();
- }
- catch (IOException ex)
- {
- logger.error(ERR_IMPORT_BUFFER_IO_ERROR, indexMgr.getBufferFileName());
- throw new RuntimeException(ex);
- }
- }
- return indexID;
- }
-
- /**
- * Reads the next record from the buffer, skipping any remaining data in the
- * current record.
- *
- * @throws IOException
- * If an IO error occurred.
- */
- public void fetchNextRecord() throws IOException
- {
- switch (recordState)
- {
- case START:
- // Nothing to skip.
- break;
- case NEED_INSERT_ID_SET:
- // The previous record's ID sets were not read, so skip them both.
- mergeIDSet(null);
- mergeIDSet(null);
- break;
- case NEED_DELETE_ID_SET:
- // The previous record's delete ID set was not read, so skip it.
- mergeIDSet(null);
- break;
- }
-
- indexID = getInt();
- readKey();
-
- recordState = RecordState.NEED_INSERT_ID_SET;
- }
-
- private void readKey() throws IOException
- {
- ensureData(20);
- byte[] ba = cache.array();
- int p = cache.position();
- int len = PackedInteger.getReadIntLength(ba, p);
- int keyLen = PackedInteger.readInt(ba, p);
- cache.position(p + len);
- if (keyLen > keyBuf.capacity())
- {
- keyBuf = ByteBuffer.allocate(keyLen);
- }
- ensureData(keyLen);
- keyBuf.clear();
- cache.get(keyBuf.array(), 0, keyLen);
- keyBuf.limit(keyLen);
- }
-
- private int getInt() throws IOException
- {
- ensureData(4);
- return cache.getInt();
- }
-
- /**
- * Reads the next ID set from the record and merges it with the provided ID
- * set.
- *
- * @param idSet
- * The ID set to be merged.
- * @throws IOException
- * If an IO error occurred.
- */
- public void mergeIDSet(ImportIDSet idSet) throws IOException
- {
- if (recordState == RecordState.START)
- {
- throw new IllegalStateException();
- }
-
- ensureData(20);
- int p = cache.position();
- byte[] ba = cache.array();
- int len = PackedInteger.getReadIntLength(ba, p);
- int keyCount = PackedInteger.readInt(ba, p);
- p += len;
- cache.position(p);
- for (int k = 0; k < keyCount; k++)
- {
- if (ensureData(9))
- {
- p = cache.position();
- }
- len = PackedInteger.getReadLongLength(ba, p);
- long entryID = PackedInteger.readLong(ba, p);
- p += len;
- cache.position(p);
-
- // idSet will be null if skipping.
- if (idSet != null)
- {
- if (entryID == UNDEFINED_SIZE)
- {
- idSet.setUndefined();
- }
- else
- {
- idSet.addEntryID(entryID);
- }
- }
- }
-
- switch (recordState)
- {
- case START:
- throw new IllegalStateException();
- case NEED_INSERT_ID_SET:
- recordState = RecordState.NEED_DELETE_ID_SET;
- break;
- case NEED_DELETE_ID_SET:
- recordState = RecordState.START;
- break;
- }
- }
-
- private boolean ensureData(int len) throws IOException
- {
- if (cache.remaining() == 0)
- {
- cache.clear();
- loadCache();
- cache.flip();
- return true;
- }
- else if (cache.remaining() < len)
- {
- cache.compact();
- loadCache();
- cache.flip();
- return true;
- }
- return false;
- }
-
- /**
- * Compares this buffer with the provided key and index ID.
- *
- * @param cKey
- * The key.
- * @param cIndexID
- * The index ID.
- * @return A negative number if this buffer is less than the provided key and
- * index ID, a positive number if this buffer is greater, or zero if
- * it is the same.
- */
- int compare(ByteBuffer cKey, Integer cIndexID)
- {
- ensureRecordFetched();
- int cmp = Importer.indexComparator.compare(keyBuf.array(), 0, keyBuf.limit(), cKey.array(), cKey.limit());
- if (cmp == 0)
- {
- return (indexID.intValue() == cIndexID.intValue()) ? 0 : 1;
- }
- return 1;
- }
-
- /** {@inheritDoc} */
- @Override
- public int compareTo(IndexInputBuffer o)
- {
- // used in remove.
- if (this == o)
- {
- return 0;
- }
-
- ensureRecordFetched();
- o.ensureRecordFetched();
-
- byte[] oKey = o.keyBuf.array();
- int oLen = o.keyBuf.limit();
- int cmp = Importer.indexComparator.compare(keyBuf.array(), 0, keyBuf.limit(), oKey, oLen);
- if (cmp == 0)
- {
- cmp = indexID.intValue() - o.getIndexID().intValue();
- if (cmp == 0)
- {
- return bufferID - o.bufferID;
- }
- }
- return cmp;
- }
-
- private void ensureRecordFetched()
- {
- if (keyBuf.limit() == 0)
- {
- getIndexID();
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexOutputBuffer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexOutputBuffer.java
deleted file mode 100644
index 42e87ef..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexOutputBuffer.java
+++ /dev/null
@@ -1,970 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2009-2010 Sun Microsystems, Inc.
- * Portions Copyright 2013-2015 ForgeRock AS.
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.server.backends.jeb.Importer.*;
-import static org.opends.server.util.StaticUtils.*;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import com.sleepycat.util.PackedInteger;
-
-/**
- * This class represents a index buffer used to store the keys and entry IDs
- * processed from the LDIF file during phase one of an import, or rebuild index
- * process. Each key and ID is stored in a record in the buffer.
- * <p>
- * The records in the buffer are eventually sorted, based on the key, when the
- * maximum size of the buffer is reached and no more records will fit into the
- * buffer. The buffer is scheduled to be flushed to an index scratch file and
- * then re-cycled by the import, or rebuild-index process.
- * </p>
- * <p>
- * The structure of a record in the buffer is the following:
- *
- * <pre>
- * +-------------+-------------+---------+---------+------------+-----------+
- * | record size | INS/DEL bit | indexID | entryID | key length | key bytes |
- * +-------------+-------------+---------+---------+------------+-----------+
- * </pre>
- *
- * The record size is used for fast seeks to quickly "jump" over records.
- * </p>
- * <p>
- * The records are packed as much as possible, to optimize the buffer space.<br>
- * This class is not thread safe.
- * </p>
- */
-final class IndexOutputBuffer implements Comparable<IndexOutputBuffer> {
-
- /** Enumeration used when sorting a buffer. */
- private enum CompareOp {
- LT, GT, LE, GE, EQ
- }
-
- /**
- * The record overhead. In addition to entryID, key length and key bytes, the
- * record overhead includes the indexID + INS/DEL bit
- */
- private static final int REC_OVERHEAD = INT_SIZE + 1;
-
- /** Buffer records are either insert records or delete records. */
- private static final byte DEL = 0, INS = 1;
-
- /** The size of a buffer. */
- private final int size;
- /** Byte array holding the actual buffer data. */
- private final byte buffer[];
-
- /**
- * Used to break a tie (keys equal) when the buffers are being merged
- * for writing to the index scratch file.
- */
- private long bufferID;
-
- /** OffSet where next key is written. */
- private int keyOffset;
- /** OffSet where next value record is written. */
- private int recordOffset;
- /** Amount of bytes left in the buffer. */
- private int bytesLeft;
- /** Number of keys in the buffer. */
- private int keys;
- /** Used to iterate over the buffer when writing to a scratch file. */
- private int position;
-
- /**
- * Used to make sure that an instance of this class is put on the
- * correct scratch file writer work queue for processing.
- */
- private Importer.IndexKey indexKey;
-
- /** Initial capacity of re-usable buffer used in key compares. */
- private static final int CAP = 32;
-
- /**
- * This buffer is reused during key compares. It's main purpose is to keep
- * memory footprint as small as possible.
- */
- private ByteBuffer keyBuffer = ByteBuffer.allocate(CAP);
-
- /**
- * Set to {@code true} if the buffer should not be recycled. Used when the
- * importer/rebuild index process is doing phase one cleanup and flushing
- * buffers not completed.
- */
- private boolean discarded;
-
-
- /**
- * Create an instance of a IndexBuffer using the specified size.
- *
- * @param size The size of the underlying byte array.
- */
- public IndexOutputBuffer(int size) {
- this.size = size;
- this.buffer = new byte[size];
- this.bytesLeft = size;
- this.recordOffset = size - 1;
- }
-
-
- /**
- * Reset an IndexBuffer so it can be re-cycled.
- */
- public void reset() {
- bytesLeft = size;
- keyOffset = 0;
- recordOffset = size - 1;
- keys = 0;
- position = 0;
- indexKey = null;
- }
-
- /**
- * Creates a new poison buffer. Poison buffers are used to stop the processing of import tasks.
- *
- * @return a new poison buffer
- */
- public static IndexOutputBuffer poison()
- {
- return new IndexOutputBuffer(0);
- }
-
- /**
- * Set the ID of a buffer to the specified value.
- *
- * @param bufferID The value to set the buffer ID to.
- */
- public void setBufferID(long bufferID)
- {
- this.bufferID = bufferID;
- }
-
- /**
- * Return the ID of a buffer.
- *
- * @return The value of a buffer's ID.
- */
- private long getBufferID()
- {
- return this.bufferID;
- }
-
- /**
- * Determines if a buffer is a poison buffer. A poison buffer is used to
- * shutdown work queues when import/rebuild index phase one is completed.
- * A poison buffer has a 0 size.
- *
- * @return {@code true} if a buffer is a poison buffer, or {@code false}
- * otherwise.
- */
- public boolean isPoison()
- {
- return size == 0;
- }
-
- /**
- * Determines if buffer should be re-cycled by calling {@link #reset()}.
- *
- * @return {@code true} if buffer should be recycled, or {@code false} if it
- * should not.
- */
- public boolean isDiscarded()
- {
- return discarded;
- }
-
- /**
- * Sets the discarded flag to {@code true}.
- */
- public void discard()
- {
- discarded = true;
- }
-
- /**
- * Returns {@code true} if there is enough space available to write the
- * specified byte array in the buffer. It returns {@code false} otherwise.
- *
- * @param kBytes The byte array to check space against.
- * @param entryID The entryID value to check space against.
- * @return {@code true} if there is space to write the byte array in a
- * buffer, or {@code false} otherwise.
- */
- public boolean isSpaceAvailable(byte[] kBytes, long entryID) {
- return getRequiredSize(kBytes.length, entryID) < bytesLeft;
- }
-
- /**
- * Return a buffer's current position value.
- *
- * @return The buffer's current position value.
- */
- public int getPosition()
- {
- return position;
- }
-
- /**
- * Set a buffer's position value to the specified position.
- *
- * @param position The value to set the position to.
- */
- public void setPosition(int position)
- {
- this.position = position;
- }
-
- /**
- * Sort the buffer.
- */
- public void sort() {
- sort(0, keys);
- }
-
- /**
- * Add the specified key byte array and EntryID to the buffer.
- *
- * @param keyBytes The key byte array.
- * @param entryID The EntryID.
- * @param indexID The index ID the record belongs.
- * @param insert <CODE>True</CODE> if key is an insert, false otherwise.
- */
- public void add(byte[] keyBytes, EntryID entryID, int indexID, boolean insert) {
- // write the record data, but leave the space to write the record size just
- // before it
- recordOffset = addRecord(keyBytes, entryID.longValue(), indexID, insert);
- // then write the returned record size
- keyOffset += writeIntBytes(buffer, keyOffset, recordOffset);
- bytesLeft = recordOffset - keyOffset;
- keys++;
- }
-
-
- /**
- * Writes the full record minus the record size itself.
- */
- private int addRecord(byte[] key, long entryID, int indexID, boolean insert)
- {
- int retOffset = recordOffset - getRecordSize(key.length, entryID);
- int offSet = retOffset;
-
- // write the INS/DEL bit
- buffer[offSet++] = insert ? INS : DEL;
- // write the indexID
- offSet += writeIntBytes(buffer, offSet, indexID);
- // write the entryID
- offSet = PackedInteger.writeLong(buffer, offSet, entryID);
- // write the key length
- offSet = PackedInteger.writeInt(buffer, offSet, key.length);
- // write the key bytes
- System.arraycopy(key, 0, buffer, offSet, key.length);
- return retOffset;
- }
-
-
- /**
- * Computes the full size of the record.
- *
- * @param keyLen The length of the key of index
- * @param entryID The entry id
- * @return The size that such record would take in the IndexOutputBuffer
- */
- public static int getRequiredSize(int keyLen, long entryID)
- {
- // Adds up the key length + key bytes + entryID + indexID + the INS/DEL bit
- // and finally the space needed to store the record size
- return getRecordSize(keyLen, entryID) + INT_SIZE;
- }
-
- private static int getRecordSize(int keyLen, long entryID)
- {
- // Adds up the key length + key bytes + ...
- return PackedInteger.getWriteIntLength(keyLen) + keyLen +
- // ... entryID + (indexID + INS/DEL bit).
- PackedInteger.getWriteLongLength(entryID) + REC_OVERHEAD;
- }
-
- /**
- * Write record at specified position to the specified output stream.
- * Used when when writing the index scratch files.
- *
- * @param stream The stream to write the record at the index to.
- * @param position The position of the record to write.
- */
- public void writeEntryID(ByteArrayOutputStream stream, int position)
- {
- int offSet = getOffset(position);
- int len = PackedInteger.getReadLongLength(buffer, offSet + REC_OVERHEAD);
- stream.write(buffer, offSet + REC_OVERHEAD, len);
- }
-
-
- /**
- * Return {@code true} if the record specified by the position is an insert
- * record, or {@code false} if it a delete record.
- *
- * @param position The position of the record.
- * @return {@code true} if the record is an insert record, or {@code false}
- * if it is a delete record.
- */
- public boolean isInsertRecord(int position)
- {
- int recOffset = getOffset(position);
- return buffer[recOffset] == INS;
- }
-
- /**
- * Return the size of the key part of the record.
- *
- * @return The size of the key part of the record.
- */
- public int getKeySize()
- {
- int offSet = getOffset(position) + REC_OVERHEAD;
- offSet += PackedInteger.getReadLongLength(buffer, offSet);
- return PackedInteger.readInt(buffer, offSet);
- }
-
- /**
- * Return the key value part of a record indicated by the current buffer
- * position.
- *
- * @return byte array containing the key value.
- */
- public byte[] getKey()
- {
- return getKey(position);
- }
-
- /** Used to minimized memory usage when comparing keys. */
- private ByteBuffer getKeyBuf(int position)
- {
- keyBuffer.clear();
- int offSet = getOffset(position) + REC_OVERHEAD;
- offSet += PackedInteger.getReadLongLength(buffer, offSet);
- int keyLen = PackedInteger.readInt(buffer, offSet);
- offSet += PackedInteger.getReadIntLength(buffer, offSet);
- //Re-allocate if the key is bigger than the capacity.
- if(keyLen > keyBuffer.capacity())
- {
- keyBuffer = ByteBuffer.allocate(keyLen);
- }
- keyBuffer.put(buffer, offSet, keyLen);
- keyBuffer.flip();
- return keyBuffer;
- }
-
-
- /**
- * Return the key value part of a record specified by the index.
- *
- * @param position position to return.
- * @return byte array containing the key value.
- */
- private byte[] getKey(int position)
- {
- int offSet = getOffset(position) + REC_OVERHEAD;
- offSet += PackedInteger.getReadLongLength(buffer, offSet);
- int keyLen = PackedInteger.readInt(buffer, offSet);
- offSet += PackedInteger.getReadIntLength(buffer, offSet);
- byte[] key = new byte[keyLen];
- System.arraycopy(buffer, offSet, key, 0, keyLen);
- return key;
- }
-
- private int getOffset(int position)
- {
- return getIntegerValue(position * INT_SIZE);
- }
-
- /**
- * Return index id associated with the current position's record.
- *
- * @return The index id.
- */
- public int getIndexID()
- {
- return getIndexID(position);
- }
-
- private int getIndexID(int position)
- {
- return getIndexIDFromOffset(getOffset(position));
- }
-
- private int getIndexIDFromOffset(int offset)
- {
- return getIntegerValue(offset + 1);
- }
-
- private boolean is(CompareOp op, int xPosition, int yPosition)
- {
- int xoffSet = getOffset(xPosition);
- int xIndexID = getIndexIDFromOffset(xoffSet);
- xoffSet += REC_OVERHEAD;
- xoffSet += PackedInteger.getReadLongLength(buffer, xoffSet);
- int xKeyLen = PackedInteger.readInt(buffer, xoffSet);
- int xKey = PackedInteger.getReadIntLength(buffer, xoffSet) + xoffSet;
-
- int yoffSet = getOffset(yPosition);
- int yIndexID = getIndexIDFromOffset(yoffSet);
- yoffSet += REC_OVERHEAD;
- yoffSet += PackedInteger.getReadLongLength(buffer, yoffSet);
- int yKeyLen = PackedInteger.readInt(buffer, yoffSet);
- int yKey = PackedInteger.getReadIntLength(buffer, yoffSet) + yoffSet;
-
- int cmp = indexComparator.compare(buffer, xKey, xKeyLen, xIndexID, yKey, yKeyLen, yIndexID);
- return evaluateReturnCode(cmp, op);
- }
-
- private boolean is(CompareOp op, int xPosition, byte[] yKey, int yIndexID)
- {
- int xoffSet = getOffset(xPosition);
- int xIndexID = getIndexIDFromOffset(xoffSet);
- xoffSet += REC_OVERHEAD;
- xoffSet += PackedInteger.getReadLongLength(buffer, xoffSet);
- int xKeyLen = PackedInteger.readInt(buffer, xoffSet);
- int xKey = PackedInteger.getReadIntLength(buffer, xoffSet) + xoffSet;
-
- int cmp = indexComparator.compare(buffer, xKey, xKeyLen, xIndexID, yKey, yKey.length, yIndexID);
- return evaluateReturnCode(cmp, op);
- }
-
- /**
- * Compare the byte array at the current position with the specified one and
- * using the specified index id. It will return {@code true} if the byte
- * array at the current position is equal to the specified byte array as
- * determined by the comparator and the index ID is is equal. It will
- * return {@code false} otherwise.
- *
- * @param b The byte array to compare.
- * @param bIndexID The index key.
- * @return <CODE>True</CODE> if the byte arrays are equal.
- */
- public boolean compare(byte[]b, int bIndexID)
- {
- int offset = getOffset(position);
- int indexID = getIndexIDFromOffset(offset);
- offset += REC_OVERHEAD;
- offset += PackedInteger.getReadLongLength(buffer, offset);
- int keyLen = PackedInteger.readInt(buffer, offset);
- int key = PackedInteger.getReadIntLength(buffer, offset) + offset;
- return indexComparator.compare(buffer, key, keyLen, b, b.length) == 0
- && indexID == bIndexID;
- }
-
-
- /**
- * Compare current IndexBuffer to the specified index buffer using both the
- * comparator and index ID of both buffers.
- *
- * The key at the value of position in both buffers are used in the compare.
- *
- * @param b The IndexBuffer to compare to.
- * @return 0 if the buffers are equal, -1 if the current buffer is less
- * than the specified buffer, or 1 if it is greater.
- */
- @Override
- public int compareTo(IndexOutputBuffer b)
- {
- final ByteBuffer keyBuf = b.getKeyBuf(b.position);
- int offset = getOffset(position);
- int indexID = getIndexIDFromOffset(offset);
- offset += REC_OVERHEAD;
- offset += PackedInteger.getReadLongLength(buffer, offset);
- int keyLen = PackedInteger.readInt(buffer, offset);
- int key = PackedInteger.getReadIntLength(buffer, offset) + offset;
-
- final int cmp = indexComparator.compare(buffer, key, keyLen, keyBuf.array(), keyBuf.limit());
- if (cmp != 0)
- {
- return cmp;
- }
-
- final int bIndexID = b.getIndexID();
- if (indexID == bIndexID)
- {
- // This is tested in a tree set remove when a buffer is removed from the tree set.
- return compare(this.bufferID, b.getBufferID());
- }
- else if (indexID < bIndexID)
- {
- return -1;
- }
- else
- {
- return 1;
- }
- }
-
- private int compare(long l1, long l2)
- {
- if (l1 == l2)
- {
- return 0;
- }
- else if (l1 < l2)
- {
- return -1;
- }
- else
- {
- return 1;
- }
- }
-
-
- /**
- * Write a record to specified output stream using the record pointed to by
- * the current position and the specified byte stream of ids.
- *
- * @param dataStream The data output stream to write to.
- *
- * @throws IOException If an I/O error occurs writing the record.
- */
- public void writeKey(DataOutputStream dataStream) throws IOException
- {
- int offSet = getOffset(position) + REC_OVERHEAD;
- offSet += PackedInteger.getReadLongLength(buffer, offSet);
- int keyLen = PackedInteger.readInt(buffer, offSet);
- offSet += PackedInteger.getReadIntLength(buffer, offSet);
- dataStream.write(buffer, offSet, keyLen);
- }
-
- /**
- * Compare the byte array at the current position with the byte array at the
- * specified index.
- *
- * @param i The index pointing to the byte array to compare.
- * @return {@code true} if the byte arrays are equal, or {@code false} otherwise
- */
- public boolean compare(int i)
- {
- return is(CompareOp.EQ, i, position);
- }
-
- /**
- * Return the current number of keys.
- *
- * @return The number of keys currently in an index buffer.
- */
- public int getNumberKeys()
- {
- return keys;
- }
-
- /**
- * Return {@code true} if the buffer has more data to process, or
- * {@code false} otherwise. Used when iterating over the buffer writing the
- * scratch index file.
- *
- * @return {@code true} if a buffer has more data to process, or
- * {@code false} otherwise.
- */
- public boolean hasMoreData()
- {
- return position + 1 < keys;
- }
-
- /**
- * Advance the position pointer to the next record in the buffer. Used when
- * iterating over the buffer examining keys.
- */
- public void nextRecord()
- {
- position++;
- }
-
- private int writeIntBytes(byte[] buffer, int offset, int val)
- {
- for (int i = offset + INT_SIZE - 1; i >= offset; i--) {
- buffer[i] = (byte) (val & 0xff);
- val >>>= 8;
- }
- return INT_SIZE;
- }
-
- private int getIntegerValue(int index)
- {
- int answer = 0;
- for (int i = 0; i < INT_SIZE; i++) {
- byte b = buffer[index + i];
- answer <<= 8;
- answer |= b & 0xff;
- }
- return answer;
- }
-
- private int med3(int a, int b, int c)
- {
- return is(CompareOp.LT, a, b) ?
- (is(CompareOp.LT,b,c) ? b : is(CompareOp.LT,a,c) ? c : a) :
- (is(CompareOp.GT,b,c) ? b : is(CompareOp.GT,a,c) ? c : a);
- }
-
- private void sort(int off, int len)
- {
- if (len < 7) {
- for (int i=off; i<len+off; i++)
- {
- for (int j=i; j>off && is(CompareOp.GT, j-1, j); j--)
- {
- swap(j, j-1);
- }
- }
- return;
- }
-
- int m = off + (len >> 1);
- if (len > 7) {
- int l = off;
- int n = off + len - 1;
- if (len > 40) {
- int s = len/8;
- l = med3(l, l+s, l+2*s);
- m = med3(m-s, m, m+s);
- n = med3(n-2*s, n-s, n);
- }
- m = med3(l, m, n);
- }
-
- byte[] mKey = getKey(m);
- int mIndexID = getIndexID(m);
-
- int a = off, b = a, c = off + len - 1, d = c;
- while(true)
- {
- while (b <= c && is(CompareOp.LE, b, mKey, mIndexID))
- {
- if (is(CompareOp.EQ, b, mKey, mIndexID))
- {
- swap(a++, b);
- }
- b++;
- }
- while (c >= b && is(CompareOp.GE, c, mKey, mIndexID))
- {
- if (is(CompareOp.EQ, c, mKey, mIndexID))
- {
- swap(c, d--);
- }
- c--;
- }
- if (b > c)
- {
- break;
- }
- swap(b++, c--);
- }
-
- // Swap partition elements back to middle
- int s, n = off + len;
- s = Math.min(a-off, b-a );
- vectorSwap(off, b-s, s);
- s = Math.min(d-c, n-d-1);
- vectorSwap(b, n-s, s);
-
- s = b - a;
- // Recursively sort non-partition-elements
- if (s > 1)
- {
- sort(off, s);
- }
- s = d - c;
- if (s > 1)
- {
- sort(n-s, s);
- }
- }
-
- private void swap(int a, int b)
- {
- int aOffset = a * INT_SIZE;
- int bOffset = b * INT_SIZE;
- int bVal = getIntegerValue(bOffset);
- System.arraycopy(buffer, aOffset, buffer, bOffset, INT_SIZE);
- writeIntBytes(buffer, aOffset, bVal);
- }
-
- private void vectorSwap(int a, int b, int n)
- {
- for (int i=0; i<n; i++, a++, b++)
- {
- swap(a, b);
- }
- }
-
- private boolean evaluateReturnCode(int rc, CompareOp op)
- {
- switch(op) {
- case LT:
- return rc < 0;
- case GT:
- return rc > 0;
- case LE:
- return rc <= 0;
- case GE:
- return rc >= 0;
- case EQ:
- return rc == 0;
- default:
- return false;
- }
- }
-
-
- /**
- * Interface that defines two methods used to compare keys used in this
- * class. The Comparator interface cannot be used in this class, so this
- * special one is used that knows about the special properties of this class.
- *
- * @param <T> object to use in the compare
- */
- public static interface ComparatorBuffer<T> {
-
-
- /**
- * Compare two offsets in an object, usually a byte array.
- *
- * @param o The object.
- * @param offset The first offset.
- * @param length The first length.
- * @param indexID The first index id.
- * @param otherOffset The second offset.
- * @param otherLength The second length.
- * @param otherIndexID The second index id.
- * @return a negative integer, zero, or a positive integer as the first
- * offset value is less than, equal to, or greater than the second.
- */
- int compare(T o, int offset, int length, int indexID, int otherOffset,
- int otherLength, int otherIndexID);
-
-
- /**
- * Compare an offset in an object with the specified object.
- *
- * @param o The first object.
- * @param offset The first offset.
- * @param length The first length.
- * @param indexID The first index id.
- * @param other The second object.
- * @param otherLength The length of the second object.
- * @param otherIndexID The second index id.
- * @return a negative integer, zero, or a positive integer as the first
- * offset value is less than, equal to, or greater than the second
- * object.
- */
- int compare(T o, int offset, int length, int indexID, T other,
- int otherLength, int otherIndexID);
-
-
- /**
- * Compare an offset in an object with the specified object.
- *
- * @param o The first object.
- * @param offset The first offset.
- * @param length The first length.
- * @param other The second object.
- * @param otherLen The length of the second object.
- * @return a negative integer, zero, or a positive integer as the first
- * offset value is less than, equal to, or greater than the second
- * object.
- */
- int compare(T o, int offset, int length, T other,
- int otherLen);
-
- }
-
-
- /**
- * Implementation of ComparatorBuffer interface. Used to compare keys when
- * they are non-DN indexes.
- */
- public static class IndexComparator implements IndexOutputBuffer.ComparatorBuffer<byte[]>
- {
-
- /**
- * Compare two offsets in an byte array using the index compare
- * algorithm. The specified index ID is used in the comparison if the
- * byte arrays are equal.
- *
- * @param b The byte array.
- * @param offset The first offset.
- * @param length The first length.
- * @param indexID The first index id.
- * @param otherOffset The second offset.
- * @param otherLength The second length.
- * @param otherIndexID The second index id.
- * @return a negative integer, zero, or a positive integer as the first
- * offset value is less than, equal to, or greater than the second.
- */
- @Override
- public int compare(byte[] b, int offset, int length, int indexID,
- int otherOffset, int otherLength, int otherIndexID)
- {
- for(int i = 0; i < length && i < otherLength; i++)
- {
- if(b[offset + i] > b[otherOffset + i])
- {
- return 1;
- }
- else if (b[offset + i] < b[otherOffset + i])
- {
- return -1;
- }
- }
- return compareLengthThenIndexID(length, indexID, otherLength, otherIndexID);
- }
-
- /**
- * Compare an offset in an byte array with the specified byte array,
- * using the DN compare algorithm. The specified index ID is used in the
- * comparison if the byte arrays are equal.
- *
- * @param b The byte array.
- * @param offset The first offset.
- * @param length The first length.
- * @param indexID The first index id.
- * @param other The second byte array to compare to.
- * @param otherLength The second byte array's length.
- * @param otherIndexID The second index id.
- * @return a negative integer, zero, or a positive integer as the first
- * offset value is less than, equal to, or greater than the second
- * byte array.
- */
- @Override
- public int compare(byte[] b, int offset, int length, int indexID,
- byte[] other, int otherLength, int otherIndexID)
- {
- for(int i = 0; i < length && i < otherLength; i++)
- {
- if(b[offset + i] > other[i])
- {
- return 1;
- }
- else if (b[offset + i] < other[i])
- {
- return -1;
- }
- }
- return compareLengthThenIndexID(length, indexID, otherLength, otherIndexID);
- }
-
- /**
- * The arrays are equal, make sure they are in the same index
- * since multiple suffixes might have the same key.
- */
- private int compareLengthThenIndexID(int length, int indexID, int otherLength, int otherIndexID)
- {
- if (length == otherLength)
- {
- return compare(indexID, otherIndexID);
- }
- else if (length > otherLength)
- {
- return 1;
- }
- else
- {
- return -1;
- }
- }
-
- /**
- * Compare an offset in an byte array with the specified byte array,
- * using the DN compare algorithm.
- *
- * @param b The byte array.
- * @param offset The first offset.
- * @param length The first length.
- * @param other The second byte array to compare to.
- * @param otherLength The second byte array's length.
- * @return a negative integer, zero, or a positive integer as the first
- * offset value is less than, equal to, or greater than the second
- * byte array.
- */
- @Override
- public int compare(byte[] b, int offset, int length, byte[] other,
- int otherLength)
- {
- for(int i = 0; i < length && i < otherLength; i++)
- {
- if(b[offset + i] > other[i])
- {
- return 1;
- }
- else if (b[offset + i] < other[i])
- {
- return -1;
- }
- }
- return compare(length, otherLength);
- }
-
- private int compare(int i1, int i2)
- {
- if (i1 == i2)
- {
- return 0;
- }
- else if (i1 > i2)
- {
- return 1;
- }
- else
- {
- return -1;
- }
- }
- }
-
-
- /**
- * Set the index key associated with an index buffer.
- *
- * @param indexKey The index key.
- */
- public void setIndexKey(Importer.IndexKey indexKey)
- {
- this.indexKey = indexKey;
- }
-
-
- /**
- * Return the index key of an index buffer.
- * @return The index buffer's index key.
- */
- public Importer.IndexKey getIndexKey()
- {
- return indexKey;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexQuery.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexQuery.java
deleted file mode 100644
index 418626b..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexQuery.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2009-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.Collection;
-
-import org.forgerock.i18n.LocalizableMessageBuilder;
-
-import static org.opends.server.backends.jeb.IndexFilter.*;
-
-/**
- * This class represents a JE Backend Query.
- */
-@org.opends.server.types.PublicAPI(
- stability = org.opends.server.types.StabilityLevel.VOLATILE,
- mayInstantiate = false,
- mayExtend = true,
- mayInvoke = false)
-public abstract class IndexQuery
-{
- /**
- * Evaluates the index query and returns the EntryIDSet.
- *
- * @param debugMessage If not null, diagnostic message will be written
- * which will help to determine why the returned
- * EntryIDSet is not defined.
- * @return The EntryIDSet as a result of evaluation of this query.
- */
- public abstract EntryIDSet evaluate(LocalizableMessageBuilder debugMessage);
-
-
-
- /**
- * Creates an IntersectionIndexQuery object from a collection of
- * IndexQuery objects.
- *
- * @param subIndexQueries
- * A collection of IndexQuery objects.
- * @return An IntersectionIndexQuery object.
- */
- public static IndexQuery createIntersectionIndexQuery(
- Collection<IndexQuery> subIndexQueries)
- {
- return new IntersectionIndexQuery(subIndexQueries);
- }
-
-
-
- /**
- * Creates a union IndexQuery object from a collection of IndexQuery
- * objects.
- *
- * @param subIndexQueries
- * Collection of IndexQuery objects.
- * @return A UnionIndexQuery object.
- */
- public static IndexQuery createUnionIndexQuery(
- Collection<IndexQuery> subIndexQueries)
- {
- return new UnionIndexQuery(subIndexQueries);
- }
-
-
-
- /**
- * Creates an empty IndexQuery object.
- *
- * @return A NullIndexQuery object.
- */
- public static IndexQuery createNullIndexQuery()
- {
- return new NullIndexQuery();
- }
-
-
-
- /**
- * This class creates a Null IndexQuery. It is used when there is no
- * record in the index. It may also be used when the index contains
- * all the records but an empty EntryIDSet should be returned as part
- * of the optimization.
- */
- private static final class NullIndexQuery extends IndexQuery
- {
- /** {@inheritDoc} */
- @Override
- public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
- {
- return new EntryIDSet();
- }
- }
-
- /**
- * This class creates an intersection IndexQuery from a collection of
- * IndexQuery objects.
- */
- private static final class IntersectionIndexQuery extends IndexQuery
- {
- /**
- * Collection of IndexQuery objects.
- */
- private final Collection<IndexQuery> subIndexQueries;
-
-
-
- /**
- * Creates an instance of IntersectionIndexQuery.
- *
- * @param subIndexQueries
- * Collection of IndexQuery objects.
- */
- private IntersectionIndexQuery(Collection<IndexQuery> subIndexQueries)
- {
- this.subIndexQueries = subIndexQueries;
- }
-
- /** {@inheritDoc} */
- @Override
- public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
- {
- EntryIDSet entryIDs = null;
- for (IndexQuery query : subIndexQueries)
- {
- if (entryIDs == null)
- {
- entryIDs = query.evaluate(debugMessage);
- }
- else
- {
- entryIDs.retainAll(query.evaluate(debugMessage));
- }
- if (entryIDs.isDefined()
- && entryIDs.size() <= FILTER_CANDIDATE_THRESHOLD)
- {
- break;
- }
- }
- return entryIDs;
- }
- }
-
- /**
- * This class creates a union of IndexQuery objects.
- */
- private static final class UnionIndexQuery extends IndexQuery
- {
- /**
- * Collection containing IndexQuery objects.
- */
- private final Collection<IndexQuery> subIndexQueries;
-
-
-
- /**
- * Creates an instance of UnionIndexQuery.
- *
- * @param subIndexQueries
- * The Collection of IndexQuery objects.
- */
- private UnionIndexQuery(Collection<IndexQuery> subIndexQueries)
- {
- this.subIndexQueries = subIndexQueries;
- }
-
- /** {@inheritDoc} */
- @Override
- public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
- {
- EntryIDSet entryIDs = null;
- for (IndexQuery query : subIndexQueries)
- {
- if (entryIDs == null)
- {
- entryIDs = query.evaluate(debugMessage);
- }
- else
- {
- entryIDs.addAll(query.evaluate(debugMessage));
- }
- if (entryIDs.isDefined()
- && entryIDs.size() <= FILTER_CANDIDATE_THRESHOLD)
- {
- break;
- }
- }
- return entryIDs;
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexQueryFactoryImpl.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexQueryFactoryImpl.java
deleted file mode 100644
index eae7dd1..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/IndexQueryFactoryImpl.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2009-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.Collection;
-import java.util.Map;
-
-import org.forgerock.i18n.LocalizableMessageBuilder;
-import org.forgerock.opendj.ldap.ByteSequence;
-import org.forgerock.opendj.ldap.spi.IndexQueryFactory;
-import org.forgerock.opendj.ldap.spi.IndexingOptions;
-import org.opends.server.types.AttributeType;
-
-import com.sleepycat.je.DatabaseEntry;
-import com.sleepycat.je.LockMode;
-
-import static org.opends.messages.BackendMessages.*;
-
-/**
- * This class is an implementation of IndexQueryFactory which creates
- * IndexQuery objects as part of the query of the JEB index.
- */
-public final class IndexQueryFactoryImpl implements
- IndexQueryFactory<IndexQuery>
-{
-
- private static final String PRESENCE_INDEX_KEY = "presence";
-
- /**
- * The Map containing the string type identifier and the corresponding index.
- */
- private final Map<String, Index> indexMap;
- private final IndexingOptions indexingOptions;
- private final AttributeType attribute;
-
- /**
- * Creates a new IndexQueryFactoryImpl object.
- *
- * @param indexMap
- * A map containing the index id and the corresponding index.
- * @param indexingOptions
- * The options to use for indexing
- * @param attribute
- * The Attribute type of this index, for error messages
- */
- public IndexQueryFactoryImpl(Map<String, Index> indexMap, IndexingOptions indexingOptions, AttributeType attribute)
- {
- this.indexMap = indexMap;
- this.indexingOptions = indexingOptions;
- this.attribute = attribute;
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public IndexQuery createExactMatchQuery(final String indexID, final ByteSequence value)
- {
- return new IndexQuery()
- {
-
- @Override
- public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
- {
- // Read the database and get Record for the key.
- DatabaseEntry key = new DatabaseEntry(value.toByteArray());
-
- // Select the right index to be used.
- Index index = indexMap.get(indexID);
- if (index == null)
- {
- if(debugMessage != null)
- {
- debugMessage.append(INFO_INDEX_FILTER_INDEX_TYPE_DISABLED.get(indexID, attribute.getNameOrOID()));
- }
- return createMatchAllQuery().evaluate(debugMessage);
- }
- EntryIDSet entrySet = index.readKey(key, null, LockMode.DEFAULT);
- if(debugMessage != null && !entrySet.isDefined())
- {
- updateStatsUndefinedResults(debugMessage, index);
- }
- return entrySet;
- }
- };
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public IndexQuery createRangeMatchQuery(final String indexID,
- final ByteSequence lowerBound, final ByteSequence upperBound,
- final boolean includeLowerBound, final boolean includeUpperBound)
- {
- return new IndexQuery()
- {
-
- @Override
- public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
- {
- // Find the right index.
- Index index = indexMap.get(indexID);
- if (index == null)
- {
- if(debugMessage != null)
- {
- debugMessage.append(INFO_INDEX_FILTER_INDEX_TYPE_DISABLED.get(indexID, attribute.getNameOrOID()));
- }
- return createMatchAllQuery().evaluate(debugMessage);
- }
- EntryIDSet entrySet = index.readRange(lowerBound.toByteArray(), upperBound.toByteArray(),
- includeLowerBound, includeUpperBound);
- if(debugMessage != null && !entrySet.isDefined())
- {
- updateStatsUndefinedResults(debugMessage, index);
- }
- return entrySet;
- }
- };
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public IndexQuery createIntersectionQuery(Collection<IndexQuery> subqueries)
- {
- return IndexQuery.createIntersectionIndexQuery(subqueries);
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public IndexQuery createUnionQuery(Collection<IndexQuery> subqueries)
- {
- return IndexQuery.createUnionIndexQuery(subqueries);
- }
-
-
-
- /**
- * {@inheritDoc}
- * <p>
- * It returns an empty EntryIDSet object when either all or no record
- * sets are requested.
- */
- @Override
- public IndexQuery createMatchAllQuery()
- {
- return new IndexQuery()
- {
- @Override
- public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage)
- {
- final String indexID = PRESENCE_INDEX_KEY;
- final Index index = indexMap.get(indexID);
- if (index == null)
- {
- if(debugMessage != null)
- {
- debugMessage.append(INFO_INDEX_FILTER_INDEX_TYPE_DISABLED.get(indexID, attribute.getNameOrOID()));
- }
- return new EntryIDSet();
- }
-
- EntryIDSet entrySet = index.readKey(JEBUtils.presenceKey, null, LockMode.DEFAULT);
- if (debugMessage != null && !entrySet.isDefined())
- {
- updateStatsUndefinedResults(debugMessage, index);
- }
- return entrySet;
- }
- };
- }
-
- private static void updateStatsUndefinedResults(LocalizableMessageBuilder debugMessage, Index index)
- {
- if (!index.isTrusted())
- {
- debugMessage.append(INFO_INDEX_FILTER_INDEX_NOT_TRUSTED.get(index.getName()));
- }
- else if (index.isRebuildRunning())
- {
- debugMessage.append(INFO_INDEX_FILTER_INDEX_REBUILD_IN_PROGRESS.get(index.getName()));
- }
- else
- {
- debugMessage.append(INFO_INDEX_FILTER_INDEX_LIMIT_EXCEEDED.get(index.getName()));
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public IndexingOptions getIndexingOptions()
- {
- return indexingOptions;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Indexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Indexer.java
deleted file mode 100644
index d2a39ed..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Indexer.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions copyright 2012-2015 ForgeRock AS.
- */
-package org.opends.server.backends.jeb;
-
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.forgerock.opendj.ldap.ByteSequence;
-import org.forgerock.opendj.ldap.ByteString;
-import org.opends.server.backends.jeb.AttributeIndex.KeyComparator;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * This class attempts to abstract the generation and comparison of keys
- * for an index. It is subclassed for the specific type of indexing.
- */
-public abstract class Indexer
-{
- /**
- * Get the comparator that must be used to compare index keys generated by
- * this class.
- *
- * @return A byte array comparator.
- */
- public final Comparator<byte[]> getComparator()
- {
- return KeyComparator.INSTANCE;
- }
-
- /**
- * Get the comparator that must be used to compare index keys generated by
- * this class.
- *
- * @return A byte string comparator.
- */
- public final Comparator<ByteSequence> getBSComparator()
- {
- return ByteSequence.COMPARATOR;
- }
-
- /**
- * Generate the set of index keys for an entry.
- *
- * @param entry The entry.
- * @param keys The set into which the generated keys will be inserted.
- */
- public abstract void indexEntry(Entry entry, Set<ByteString> keys);
-
- /**
- * Generate the set of index keys to be added and the set of index keys
- * to be deleted for an entry that was modified.
- *
- * @param oldEntry The original entry contents.
- * @param newEntry The new entry contents.
- * @param mods The set of modifications that were applied to the entry.
- * @param modifiedKeys The map into which the modified keys will be inserted.
- */
- public abstract void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods, Map<ByteString, Boolean> modifiedKeys);
-
- /**
- * Get a string representation of this object. The returned value is
- * used to name an index created using this object.
- * @return A string representation of this object.
- */
- @Override
- public abstract String toString();
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEBUtils.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEBUtils.java
deleted file mode 100644
index 2dd35de..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEBUtils.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- * Copyright 2014 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import com.sleepycat.je.DatabaseConfig;
-import com.sleepycat.je.DatabaseEntry;
-import com.sleepycat.je.Environment;
-
-/**
- * JE Backend utility methods.
- */
-final class JEBUtils
-{
- /** A database key for the presence index. */
- static final DatabaseEntry presenceKey = new DatabaseEntry(PresenceIndexer.presenceKeyBytes);
-
- private JEBUtils()
- {
- // Private for utility classes
- }
-
- /**
- * Converts the provided JE environment to a {@link DatabaseConfig} object
- * that disallows duplicates.
- *
- * @param env
- * the environment object to convert
- * @return a new {@link DatabaseConfig} object
- */
- static DatabaseConfig toDatabaseConfigNoDuplicates(Environment env)
- {
- final DatabaseConfig result = new DatabaseConfig();
- if (env.getConfig().getReadOnly())
- {
- result.setReadOnly(true);
- result.setAllowCreate(false);
- result.setTransactional(false);
- }
- else if (!env.getConfig().getTransactional())
- {
- result.setAllowCreate(true);
- result.setTransactional(false);
- result.setDeferredWrite(true);
- }
- else
- {
- result.setAllowCreate(true);
- result.setTransactional(true);
- }
- return result;
- }
-
- /**
- * Converts the provided JE environment to a {@link DatabaseConfig} object
- * that allows duplicates.
- *
- * @param env
- * the environment object to convert
- * @return a new {@link DatabaseConfig} object
- */
- static DatabaseConfig toDatabaseConfigAllowDuplicates(Environment env)
- {
- final DatabaseConfig result = new DatabaseConfig();
- if (env.getConfig().getReadOnly())
- {
- result.setReadOnly(true);
- result.setSortedDuplicates(true);
- result.setAllowCreate(false);
- result.setTransactional(false);
- }
- else if (!env.getConfig().getTransactional())
- {
- result.setSortedDuplicates(true);
- result.setAllowCreate(true);
- result.setTransactional(false);
- result.setDeferredWrite(true);
- }
- else
- {
- result.setSortedDuplicates(true);
- result.setAllowCreate(true);
- result.setTransactional(true);
- }
- return result;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JECompressedSchema.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JECompressedSchema.java
deleted file mode 100644
index 7ff41aa..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JECompressedSchema.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2008-2009 Sun Microsystems, Inc.
- * Portions Copyright 2013-2015 ForgeRock AS.
- */
-package org.opends.server.backends.jeb;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.io.ASN1;
-import org.forgerock.opendj.io.ASN1Reader;
-import org.forgerock.opendj.io.ASN1Writer;
-import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.opends.server.api.CompressedSchema;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.InitializationException;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.Cursor;
-import com.sleepycat.je.Database;
-import com.sleepycat.je.DatabaseConfig;
-import com.sleepycat.je.DatabaseEntry;
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.Environment;
-import com.sleepycat.je.LockConflictException;
-import com.sleepycat.je.OperationStatus;
-
-import static com.sleepycat.je.LockMode.*;
-import static com.sleepycat.je.OperationStatus.*;
-
-import static org.opends.messages.BackendMessages.*;
-
-/**
- * This class provides a compressed schema implementation whose definitions are
- * stored in a Berkeley DB JE database.
- */
-public final class JECompressedSchema extends CompressedSchema
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The name of the database used to store compressed attribute description definitions. */
- private static final String DB_NAME_AD = "compressed_attributes";
- /** The name of the database used to store compressed object class set definitions. */
- private static final String DB_NAME_OC = "compressed_object_classes";
-
- /** The compressed attribute description schema database. */
- private Database adDatabase;
- /** The environment in which the databases are held. */
- private Environment environment;
- /** The compressed object class set schema database. */
- private Database ocDatabase;
-
- private final ByteStringBuilder storeAttributeWriterBuffer = new ByteStringBuilder();
- private final ASN1Writer storeAttributeWriter = ASN1.getWriter(storeAttributeWriterBuffer);
- private final ByteStringBuilder storeObjectClassesWriterBuffer = new ByteStringBuilder();
- private final ASN1Writer storeObjectClassesWriter = ASN1.getWriter(storeObjectClassesWriterBuffer);
-
-
-
- /**
- * Creates a new instance of this JE compressed schema manager.
- *
- * @param environment
- * A reference to the database environment in which the databases
- * will be held.
- * @throws DatabaseException
- * If a database problem occurs while loading the compressed schema
- * definitions from the database.
- * @throws InitializationException
- * If an error occurs while loading and processing the compressed
- * schema definitions.
- */
- public JECompressedSchema(final Environment environment)
- throws DatabaseException, InitializationException
- {
- this.environment = environment;
- load();
- }
-
-
-
- /**
- * Closes the databases and releases any resources held by this compressed
- * schema manager.
- */
- public void close()
- {
- close0(adDatabase);
- close0(ocDatabase);
-
- adDatabase = null;
- ocDatabase = null;
- environment = null;
- }
-
- private void close0(Database database)
- {
- try
- {
- database.sync();
- }
- catch (final Exception e)
- {
- // Ignore.
- }
- StaticUtils.close(database);
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- protected void storeAttribute(final byte[] encodedAttribute,
- final String attributeName, final Collection<String> attributeOptions)
- throws DirectoryException
- {
- try
- {
- storeAttributeWriterBuffer.clear();
- storeAttributeWriter.writeStartSequence();
- storeAttributeWriter.writeOctetString(attributeName);
- for (final String option : attributeOptions)
- {
- storeAttributeWriter.writeOctetString(option);
- }
- storeAttributeWriter.writeEndSequence();
- store(adDatabase, encodedAttribute, storeAttributeWriterBuffer);
- }
- catch (final IOException e)
- {
- // TODO: Shouldn't happen but should log a message
- }
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- protected void storeObjectClasses(final byte[] encodedObjectClasses,
- final Collection<String> objectClassNames) throws DirectoryException
- {
- try
- {
- storeObjectClassesWriterBuffer.clear();
- storeObjectClassesWriter.writeStartSequence();
- for (final String ocName : objectClassNames)
- {
- storeObjectClassesWriter.writeOctetString(ocName);
- }
- storeObjectClassesWriter.writeEndSequence();
- store(ocDatabase, encodedObjectClasses, storeObjectClassesWriterBuffer);
- }
- catch (final IOException e)
- {
- // TODO: Shouldn't happen but should log a message
- }
- }
-
-
-
- /**
- * Loads the compressed schema information from the database.
- *
- * @throws DatabaseException
- * If a database error occurs while loading the definitions from the
- * database.
- * @throws InitializationException
- * If an error occurs while loading and processing the definitions.
- */
- private void load() throws DatabaseException, InitializationException
- {
- final DatabaseConfig dbConfig = JEBUtils.toDatabaseConfigNoDuplicates(environment);
-
- adDatabase = environment.openDatabase(null, DB_NAME_AD, dbConfig);
- ocDatabase = environment.openDatabase(null, DB_NAME_OC, dbConfig);
-
- // Cursor through the object class database and load the object class set
- // definitions. At the same time, figure out the highest token value and
- // initialize the object class counter to one greater than that.
- final Cursor ocCursor = ocDatabase.openCursor(null, null);
- try
- {
- final DatabaseEntry keyEntry = new DatabaseEntry();
- final DatabaseEntry valueEntry = new DatabaseEntry();
- OperationStatus status = ocCursor.getFirst(keyEntry, valueEntry, READ_UNCOMMITTED);
- while (status == SUCCESS)
- {
- final byte[] encodedObjectClasses = keyEntry.getData();
- final ASN1Reader reader = ASN1.getReader(valueEntry.getData());
- reader.readStartSequence();
- final List<String> objectClassNames = new LinkedList<>();
- while (reader.hasNextElement())
- {
- objectClassNames.add(reader.readOctetStringAsString());
- }
- reader.readEndSequence();
- loadObjectClasses(encodedObjectClasses, objectClassNames);
- status = ocCursor.getNext(keyEntry, valueEntry, READ_UNCOMMITTED);
- }
- }
- catch (final IOException e)
- {
- logger.traceException(e);
- throw new InitializationException(
- ERR_COMPSCHEMA_CANNOT_DECODE_OC_TOKEN.get(e.getMessage()), e);
- }
- finally
- {
- ocCursor.close();
- }
-
- // Cursor through the attribute description database and load the attribute
- // set definitions.
- final Cursor adCursor = adDatabase.openCursor(null, null);
- try
- {
- final DatabaseEntry keyEntry = new DatabaseEntry();
- final DatabaseEntry valueEntry = new DatabaseEntry();
- OperationStatus status = adCursor.getFirst(keyEntry, valueEntry, READ_UNCOMMITTED);
- while (status == SUCCESS)
- {
- final byte[] encodedAttribute = keyEntry.getData();
- final ASN1Reader reader = ASN1.getReader(valueEntry.getData());
- reader.readStartSequence();
- final String attributeName = reader.readOctetStringAsString();
- final List<String> attributeOptions = new LinkedList<>();
- while (reader.hasNextElement())
- {
- attributeOptions.add(reader.readOctetStringAsString());
- }
- reader.readEndSequence();
- loadAttribute(encodedAttribute, attributeName, attributeOptions);
- status = adCursor.getNext(keyEntry, valueEntry, READ_UNCOMMITTED);
- }
- }
- catch (final IOException e)
- {
- logger.traceException(e);
- throw new InitializationException(
- ERR_COMPSCHEMA_CANNOT_DECODE_AD_TOKEN.get(e.getMessage()), e);
- }
- finally
- {
- adCursor.close();
- }
- }
-
-
-
- private void store(final Database database, final byte[] key, final ByteStringBuilder value) throws DirectoryException
- {
- if (!putNoOverwrite(database, key, value))
- {
- final LocalizableMessage m = ERR_JEB_COMPSCHEMA_CANNOT_STORE_MULTIPLE_FAILURES.get();
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), m);
- }
- }
-
- private boolean putNoOverwrite(final Database database, final byte[] key, final ByteStringBuilder value)
- throws DirectoryException
- {
- final DatabaseEntry keyEntry = new DatabaseEntry(key);
- final DatabaseEntry valueEntry = new DatabaseEntry(value.getBackingArray(), 0, value.length());
- for (int i = 0; i < 3; i++)
- {
- try
- {
- final OperationStatus status = database.putNoOverwrite(null, keyEntry, valueEntry);
- if (status != SUCCESS)
- {
- final LocalizableMessage m = ERR_JEB_COMPSCHEMA_CANNOT_STORE_STATUS.get(status);
- throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), m);
- }
- return true;
- }
- catch (final LockConflictException ce)
- {
- continue;
- }
- catch (final DatabaseException de)
- {
- throw new DirectoryException(
- DirectoryServer.getServerErrorResultCode(), ERR_COMPSCHEMA_CANNOT_STORE_EX.get(de.getMessage()), de);
- }
- }
- return false;
- }
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JebException.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JebException.java
deleted file mode 100644
index 92d8a19..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JebException.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2009 Sun Microsystems, Inc.
- * Portions Copyright 2014 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-
-
-import org.opends.server.types.IdentifiedException;
-import org.forgerock.i18n.LocalizableMessage;
-
-
-/**
- * This class defines an exception that may be thrown if a problem occurs in the
- * JE backend database.
- */
-public class JebException
- extends IdentifiedException
-{
- /**
- * The serial version identifier required to satisfy the compiler because this
- * class extends <CODE>java.lang.Exception</CODE>, which implements the
- * <CODE>java.io.Serializable</CODE> interface. This value was generated
- * using the <CODE>serialver</CODE> command-line utility included with the
- * Java SDK.
- */
- static final long serialVersionUID = 3110979454298870834L;
-
-
-
- /**
- * Creates a new JE backend exception.
- */
- public JebException()
- {
- super();
- }
-
-
-
- /**
- * Creates a new JE backend exception with the provided message.
- *
- * @param message The message that explains the problem that occurred.
- */
- public JebException(LocalizableMessage message)
- {
- super(message);
- }
-
-
-
- /**
- * Creates a new JE backend exception with the provided message and root
- * cause.
- *
- * @param message The message that explains the problem that occurred.
- * @param cause The exception that was caught to trigger this exception.
- */
- public JebException(LocalizableMessage message, Throwable cause)
- {
- super(message, cause);
- }
-
-
-
-}
-
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JebFormat.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JebFormat.java
deleted file mode 100644
index 3dc1b65..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JebFormat.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.opends.server.types.DN;
-
-/**
- * Handles the disk representation of LDAP data.
- */
-public class JebFormat
-{
-
- /**
- * The format version used by this class to encode and decode a DatabaseEntry.
- */
- public static final byte FORMAT_VERSION = 0x01;
-
- /**
- * The ASN1 tag for the DatabaseEntry type.
- */
- public static final byte TAG_DATABASE_ENTRY = 0x60;
-
- /**
- * The ASN1 tag for the DirectoryServerEntry type.
- */
- public static final byte TAG_DIRECTORY_SERVER_ENTRY = 0x61;
-
- /**
- * Decode an entry ID value from its database representation. Note that
- * this method will throw an ArrayIndexOutOfBoundsException if the bytes
- * array length is less than 8.
- *
- * @param bytes The database value of the entry ID.
- * @return The entry ID value.
- * @see #entryIDToDatabase(long)
- */
- public static long entryIDFromDatabase(byte[] bytes)
- {
- return toLong(bytes, 0, 8);
- }
-
- /**
- * Decode a long from a byte array, starting at start index and ending at end
- * index.
- *
- * @param bytes
- * The bytes value of the long.
- * @param start
- * the array index where to start computing the long
- * @param end
- * the array index exclusive where to end computing the long
- * @return the long representation of the read bytes.
- * @throws ArrayIndexOutOfBoundsException
- * if the bytes array length is less than end.
- */
- public static long toLong(byte[] bytes, int start, int end)
- throws ArrayIndexOutOfBoundsException
- {
- long v = 0;
- for (int i = start; i < end; i++)
- {
- v <<= 8;
- v |= bytes[i] & 0xFF;
- }
- return v;
- }
-
- /**
- * Decode an entry ID count from its database representation.
- *
- * @param bytes The database value of the entry ID count.
- * @return The entry ID count.
- * Cannot be negative if encoded with #entryIDUndefinedSizeToDatabase(long)
- * @see #entryIDUndefinedSizeToDatabase(long)
- */
- public static long entryIDUndefinedSizeFromDatabase(byte[] bytes)
- {
- if(bytes == null)
- {
- return 0;
- }
-
- if(bytes.length == 8)
- {
- long v = 0;
- v |= bytes[0] & 0x7F;
- for (int i = 1; i < 8; i++)
- {
- v <<= 8;
- v |= bytes[i] & 0xFF;
- }
- return v;
- }
- return Long.MAX_VALUE;
- }
-
- /**
- * Decode an array of entry ID values from its database representation.
- *
- * @param bytes The raw database value, null if there is no value and
- * hence no entry IDs. Note that this method will throw an
- * ArrayIndexOutOfBoundsException if the bytes array length is
- * not a multiple of 8.
- * @return An array of entry ID values.
- * @see #entryIDListToDatabase(long[])
- */
- public static long[] entryIDListFromDatabase(byte[] bytes)
- {
- byte[] decodedBytes = bytes;
-
- int count = decodedBytes.length / 8;
- long[] entryIDList = new long[count];
- for (int pos = 0, i = 0; i < count; i++)
- {
- long v = 0;
- v |= (decodedBytes[pos++] & 0xFFL) << 56;
- v |= (decodedBytes[pos++] & 0xFFL) << 48;
- v |= (decodedBytes[pos++] & 0xFFL) << 40;
- v |= (decodedBytes[pos++] & 0xFFL) << 32;
- v |= (decodedBytes[pos++] & 0xFFL) << 24;
- v |= (decodedBytes[pos++] & 0xFFL) << 16;
- v |= (decodedBytes[pos++] & 0xFFL) << 8;
- v |= decodedBytes[pos++] & 0xFFL;
- entryIDList[i] = v;
- }
-
- return entryIDList;
- }
-
- /**
- * Decode a integer array using the specified byte array read from DB.
- *
- * @param bytes The byte array.
- * @return An integer array.
- */
- public static int[] intArrayFromDatabaseBytes(byte[] bytes) {
- byte[] decodedBytes = bytes;
-
- int count = decodedBytes.length / 8;
- int[] entryIDList = new int[count];
- for (int pos = 0, i = 0; i < count; i++) {
- int v = 0;
- pos +=4;
- v |= (decodedBytes[pos++] & 0xFFL) << 24;
- v |= (decodedBytes[pos++] & 0xFFL) << 16;
- v |= (decodedBytes[pos++] & 0xFFL) << 8;
- v |= decodedBytes[pos++] & 0xFFL;
- entryIDList[i] = v;
- }
-
- return entryIDList;
- }
-
- /**
- * Encode an entry ID value to its database representation.
- *
- * @param id The entry ID value to be encoded.
- * @return The encoded database value of the entry ID.
- * @see #entryIDFromDatabase(byte[])
- */
- public static byte[] entryIDToDatabase(long id)
- {
- byte[] bytes = new byte[8];
- long v = id;
- for (int i = 7; i >= 0; i--)
- {
- bytes[i] = (byte) (v & 0xFF);
- v >>>= 8;
- }
- return bytes;
- }
-
- /**
- * Encode an entry ID set count to its database representation.
- *
- * @param count The entry ID set count to be encoded.
- * @return The encoded database value of the entry ID set count.
- * @see #entryIDUndefinedSizeFromDatabase(byte[])
- */
- public static byte[] entryIDUndefinedSizeToDatabase(long count)
- {
- byte[] bytes = new byte[8];
- long v = count;
- for (int i = 7; i >= 1; i--)
- {
- bytes[i] = (byte) (v & 0xFF);
- v >>>= 8;
- }
- bytes[0] = (byte) ((v | 0x80) & 0xFF);
- return bytes;
- }
-
- /**
- * Encode an array of entry ID values to its database representation.
- *
- * @param entryIDArray An array of entry ID values.
- * @return The encoded database value.
- * @see #entryIDListFromDatabase(byte[])
- */
- public static byte[] entryIDListToDatabase(long[] entryIDArray)
- {
- if (entryIDArray.length == 0)
- {
- // Zero values
- return null;
- }
-
- byte[] bytes = new byte[8*entryIDArray.length];
- for (int pos = 0, i = 0; i < entryIDArray.length; i++)
- {
- long v = entryIDArray[i];
- bytes[pos++] = (byte) ((v >>> 56) & 0xFF);
- bytes[pos++] = (byte) ((v >>> 48) & 0xFF);
- bytes[pos++] = (byte) ((v >>> 40) & 0xFF);
- bytes[pos++] = (byte) ((v >>> 32) & 0xFF);
- bytes[pos++] = (byte) ((v >>> 24) & 0xFF);
- bytes[pos++] = (byte) ((v >>> 16) & 0xFF);
- bytes[pos++] = (byte) ((v >>> 8) & 0xFF);
- bytes[pos++] = (byte) (v & 0xFF);
- }
-
- return bytes;
- }
-
- /**
- * Find the length of bytes that represents the superior DN of the given
- * DN key. The superior DN is represented by the initial bytes of the DN key.
- *
- * @param dnKey The database key value of the DN.
- * @return The length of the superior DN or -1 if the given dn is the
- * root DN or 0 if the superior DN is removed.
- */
- public static int findDNKeyParent(byte[] dnKey)
- {
- return findDNKeyParent(dnKey, 0, dnKey.length);
- }
-
- /**
- * Find the length of bytes that represents the superior DN of the given
- * DN key. The superior DN is represented by the initial bytes of the DN key.
- *
- * @param dnKey The database key value of the DN.
- * @param offset Starting position in the database key data.
- * @param length The length of the database key data.
- * @return The length of the superior DN or -1 if the given dn is the
- * root DN or 0 if the superior DN is removed.
- */
- public static int findDNKeyParent(byte[] dnKey, int offset, int length)
- {
- if (length == 0)
- {
- // This is the root or base DN
- return -1;
- }
-
- // We will walk backwards through the buffer and
- // find the first unescaped NORMALIZED_RDN_SEPARATOR
- for (int i = offset+length - 1; i >= offset; i--)
- {
- if (dnKey[i] == DN.NORMALIZED_RDN_SEPARATOR && i-1 >= offset && dnKey[i-1] != DN.NORMALIZED_ESC_BYTE)
- {
- return i;
- }
- }
- return offset;
- }
-
- /**
- * Create a DN database key from an entry DN.
- *
- * @param dn The entry DN.
- * @param prefixRDNs The number of prefix RDNs to remove from the encoded
- * representation.
- * @return A DatabaseEntry containing the key.
- */
- public static byte[] dnToDNKey(DN dn, int prefixRDNs)
- {
- ByteStringBuilder builder = new ByteStringBuilder();
- int startSize = dn.size() - prefixRDNs - 1;
- for (int i = startSize; i >= 0; i--)
- {
- builder.appendByte(DN.NORMALIZED_RDN_SEPARATOR);
- dn.getRDN(i).toNormalizedByteString(builder);
- }
-
- return builder.toByteArray();
- }
-
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/NullIndex.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/NullIndex.java
deleted file mode 100644
index c41a83b..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/NullIndex.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- * Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.List;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ConditionResult;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-import com.sleepycat.je.*;
-
-/**
- * A null index which replaces id2children and id2subtree when they have been
- * disabled.
- */
-final class NullIndex extends Index
-{
-
- /**
- * Create a new null index object.
- *
- * @param name
- * The name of the index database within the entryContainer.
- * @param indexer
- * The indexer object to construct index keys from LDAP attribute
- * values.
- * @param state
- * The state database to persist index state info.
- * @param env
- * The JE Environment
- * @param entryContainer
- * The database entryContainer holding this index.
- * @throws DatabaseException
- * If an error occurs in the JE database.
- */
- public NullIndex(String name, Indexer indexer, State state, Environment env, EntryContainer entryContainer)
- throws DatabaseException
- {
- super(name, indexer, state, 0, 0, false, env, entryContainer);
- }
-
- /** {@inheritDoc} */
- @Override
- public void insert(DatabaseEntry key, ImportIDSet importIdSet, DatabaseEntry data) throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public void delete(DatabaseEntry key, ImportIDSet importIdSet, DatabaseEntry data) throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- void updateKey(Transaction txn, DatabaseEntry key, EntryIDSet deletedIDs, EntryIDSet addedIDs)
- throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public void delete(IndexBuffer buffer, ByteString keyBytes)
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public ConditionResult containsID(Transaction txn, DatabaseEntry key, EntryID entryID) throws DatabaseException
- {
- return ConditionResult.UNDEFINED;
- }
-
- /** {@inheritDoc} */
- @Override
- public EntryIDSet readKey(DatabaseEntry key, Transaction txn, LockMode lockMode)
- {
- return new EntryIDSet();
- }
-
- /** {@inheritDoc} */
- @Override
- public void writeKey(Transaction txn, DatabaseEntry key, EntryIDSet entryIDList) throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public EntryIDSet readRange(byte[] lower, byte[] upper, boolean lowerIncluded, boolean upperIncluded)
- {
- return new EntryIDSet();
- }
-
- /** {@inheritDoc} */
- @Override
- public int getEntryLimitExceededCount()
- {
- return 0;
- }
-
- /** {@inheritDoc} */
- @Override
- public void addEntry(IndexBuffer buffer, EntryID entryID, Entry entry)
- throws DatabaseException, DirectoryException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public void removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry)
- throws DatabaseException, DirectoryException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(IndexBuffer buffer, EntryID entryID, Entry oldEntry, Entry newEntry, List<Modification> mods)
- throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean setIndexEntryLimit(int indexEntryLimit)
- {
- return false;
- }
-
- /** {@inheritDoc} */
- @Override
- public int getIndexEntryLimit()
- {
- return 0;
- }
-
- /** {@inheritDoc} */
- @Override
- public void setTrusted(Transaction txn, boolean trusted) throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean isTrusted()
- {
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean isRebuildRunning()
- {
- return false;
- }
-
- /** {@inheritDoc} */
- @Override
- public boolean getMaintainCount()
- {
- return false;
- }
-
- /** {@inheritDoc} */
- @Override
- public void open() throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- public void close() throws DatabaseException
- {
- // Do nothing.
- }
-
- /** {@inheritDoc} */
- @Override
- OperationStatus put(Transaction txn, DatabaseEntry key, DatabaseEntry data) throws DatabaseException
- {
- return OperationStatus.SUCCESS;
- }
-
- /** {@inheritDoc} */
- @Override
- OperationStatus read(Transaction txn, DatabaseEntry key, DatabaseEntry data, LockMode lockMode)
- throws DatabaseException
- {
- return OperationStatus.SUCCESS;
- }
-
- /** {@inheritDoc} */
- @Override
- OperationStatus insert(Transaction txn, DatabaseEntry key, DatabaseEntry data) throws DatabaseException
- {
- return OperationStatus.SUCCESS;
- }
-
- /** {@inheritDoc} */
- @Override
- OperationStatus delete(Transaction txn, DatabaseEntry key) throws DatabaseException
- {
- return OperationStatus.SUCCESS;
- }
-
- /** {@inheritDoc} */
- @Override
- public Cursor openCursor(Transaction txn, CursorConfig cursorConfig) throws DatabaseException
- {
- throw new IllegalStateException();
- }
-
- /** {@inheritDoc} */
- @Override
- public long getRecordCount() throws DatabaseException
- {
- return 0;
- }
-
- /** {@inheritDoc} */
- @Override
- public PreloadStats preload(PreloadConfig config) throws DatabaseException
- {
- return new PreloadStats();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/PresenceIndexer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/PresenceIndexer.java
deleted file mode 100644
index d47f304..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/PresenceIndexer.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.opends.server.types.Attribute;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-
-/**
- * An implementation of an Indexer for attribute presence.
- */
-public class PresenceIndexer extends Indexer
-{
- /** The key bytes used for the presence index. */
- static final byte[] presenceKeyBytes = "+".getBytes();
-
- /** The key bytes used for the presence index as a {@link ByteString}. */
- static final ByteString presenceKey = ByteString.wrap(presenceKeyBytes);
-
- /** The attribute type for which this instance will generate index keys. */
- private AttributeType attributeType;
-
- /**
- * Create a new attribute presence indexer.
- * @param attributeType The attribute type for which the indexer
- * is required.
- */
- public PresenceIndexer(AttributeType attributeType)
- {
- this.attributeType = attributeType;
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString()
- {
- return attributeType.getNameOrOID() + ".presence";
- }
-
- /** {@inheritDoc} */
- @Override
- public void indexEntry(Entry entry, Set<ByteString> keys)
- {
- List<Attribute> attrList = entry.getAttribute(attributeType);
- if (attrList != null && !attrList.isEmpty())
- {
- keys.add(presenceKey);
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void modifyEntry(Entry oldEntry, Entry newEntry,
- List<Modification> mods,
- Map<ByteString, Boolean> modifiedKeys)
- {
- List<Attribute> newAttributes = newEntry.getAttribute(attributeType, true);
- List<Attribute> oldAttributes = oldEntry.getAttribute(attributeType, true);
- if(oldAttributes == null)
- {
- if(newAttributes != null)
- {
- modifiedKeys.put(presenceKey, true);
- }
- }
- else
- {
- if(newAttributes == null)
- {
- modifiedKeys.put(presenceKey, false);
- }
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/RemoveOnceLocalDBBackendIsPluggable.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/RemoveOnceLocalDBBackendIsPluggable.java
deleted file mode 100644
index 74c1266..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/RemoveOnceLocalDBBackendIsPluggable.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-
-/**
- * Temporary annotation to remind that annotated code could be removed once the
- * JE backend will be converted to pluggable backend.
- */
-@Retention(RetentionPolicy.SOURCE)
-public @interface RemoveOnceLocalDBBackendIsPluggable
-{
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/RootContainer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/RootContainer.java
deleted file mode 100644
index 8f8952f..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/RootContainer.java
+++ /dev/null
@@ -1,866 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.io.File;
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.opends.server.admin.server.ConfigurationChangeListener;
-import org.opends.server.admin.std.server.LocalDBBackendCfg;
-import org.opends.server.api.Backend;
-import org.opends.server.core.DirectoryServer;
-import org.forgerock.opendj.config.server.ConfigChangeResult;
-import org.opends.server.types.DN;
-import org.opends.server.types.FilePermission;
-import org.opends.server.types.InitializationException;
-
-import com.sleepycat.je.*;
-import com.sleepycat.je.config.ConfigParam;
-import com.sleepycat.je.config.EnvironmentParams;
-
-import static org.opends.messages.ConfigMessages.*;
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.util.StaticUtils.*;
-
-/**
- * Wrapper class for the JE environment. Root container holds all the entry
- * containers for each base DN. It also maintains all the openings and closings
- * of the entry containers.
- */
-public class RootContainer
- implements ConfigurationChangeListener<LocalDBBackendCfg>
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The JE database environment. */
- private Environment env;
-
- /** Used to force a checkpoint during import. */
- private final CheckpointConfig importForceCheckPoint = new CheckpointConfig();
-
- /** The backend configuration. */
- private LocalDBBackendCfg config;
-
- /** The backend to which this entry root container belongs. */
- private final Backend<?> backend;
-
- /** The database environment monitor for this JE environment. */
- private DatabaseEnvironmentMonitor monitor;
-
- /** The base DNs contained in this root container. */
- private final ConcurrentHashMap<DN, EntryContainer> entryContainers = new ConcurrentHashMap<>();
-
- /** The cached value of the next entry identifier to be assigned. */
- private AtomicLong nextid = new AtomicLong(1);
-
- /** The compressed schema manager for this backend. */
- private JECompressedSchema compressedSchema;
-
-
-
- /**
- * Creates a new RootContainer object. Each root container represents a JE
- * environment.
- *
- * @param config The configuration of the JE backend.
- * @param backend A reference to the JE back end that is creating this
- * root container.
- */
- RootContainer(Backend<?> backend, LocalDBBackendCfg config)
- {
- this.backend = backend;
- this.config = config;
-
- getMonitorProvider().enableFilterUseStats(config.isIndexFilterAnalyzerEnabled());
- getMonitorProvider().setMaxEntries(config.getIndexFilterAnalyzerMaxFilters());
-
- config.addLocalDBChangeListener(this);
- importForceCheckPoint.setForce(true);
- }
-
- /**
- * Opens the root container using the JE configuration object provided.
- *
- * @param envConfig The JE environment configuration.
- * @throws DatabaseException If a database error occurs when creating
- * the environment.
- * @throws InitializationException If an initialization error occurs while
- * creating the environment.
- * @throws ConfigException If an configuration error occurs while
- * creating the environment.
- */
- public void open(EnvironmentConfig envConfig)
- throws DatabaseException, InitializationException, ConfigException
- {
- // Determine the backend database directory.
- File parentDirectory = getFileForPath(config.getDBDirectory());
- File backendDirectory = new File(parentDirectory, config.getBackendId());
-
- // Create the directory if it doesn't exist.
- if (!backendDirectory.exists())
- {
- if(!backendDirectory.mkdirs())
- {
- throw new ConfigException(ERR_CREATE_FAIL.get(backendDirectory.getPath()));
- }
- }
- //Make sure the directory is valid.
- else if (!backendDirectory.isDirectory())
- {
- throw new ConfigException(ERR_DIRECTORY_INVALID.get(backendDirectory.getPath()));
- }
-
- FilePermission backendPermission;
- try
- {
- backendPermission =
- FilePermission.decodeUNIXMode(config.getDBDirectoryPermissions());
- }
- catch(Exception e)
- {
- throw new ConfigException(ERR_CONFIG_BACKEND_MODE_INVALID.get(config.dn()));
- }
-
- //Make sure the mode will allow the server itself access to
- //the database
- if(!backendPermission.isOwnerWritable() ||
- !backendPermission.isOwnerReadable() ||
- !backendPermission.isOwnerExecutable())
- {
- LocalizableMessage message = ERR_CONFIG_BACKEND_INSANE_MODE.get(
- config.getDBDirectoryPermissions());
- throw new ConfigException(message);
- }
-
- // Get the backend database backendDirectory permissions and apply
- try
- {
- if(!FilePermission.setPermissions(backendDirectory, backendPermission))
- {
- logger.warn(WARN_UNABLE_SET_PERMISSIONS, backendPermission, backendDirectory);
- }
- }
- catch(Exception e)
- {
- // Log an warning that the permissions were not set.
- logger.warn(WARN_SET_PERMISSIONS_FAILED, backendDirectory, e);
- }
-
- // Open the database environment
- env = new Environment(backendDirectory,
- envConfig);
-
- if (logger.isTraceEnabled())
- {
- logger.trace("JE (%s) environment opened with the following config: %n%s",
- JEVersion.CURRENT_VERSION, env.getConfig());
-
- // Get current size of heap in bytes
- long heapSize = Runtime.getRuntime().totalMemory();
-
- // Get maximum size of heap in bytes. The heap cannot grow beyond this size.
- // Any attempt will result in an OutOfMemoryException.
- long heapMaxSize = Runtime.getRuntime().maxMemory();
-
- // Get amount of free memory within the heap in bytes. This size will increase
- // after garbage collection and decrease as new objects are created.
- long heapFreeSize = Runtime.getRuntime().freeMemory();
-
- logger.trace("Current size of heap: %d bytes", heapSize);
- logger.trace("Max size of heap: %d bytes", heapMaxSize);
- logger.trace("Free memory in heap: %d bytes", heapFreeSize);
- }
-
- compressedSchema = new JECompressedSchema(env);
- openAndRegisterEntryContainers(config.getBaseDN());
- }
-
- /**
- * Opens the entry container for a base DN. If the entry container does not
- * exist for the base DN, it will be created. The entry container will be
- * opened with the same mode as the root container. Any entry containers
- * opened in a read only root container will also be read only. Any entry
- * containers opened in a non transactional root container will also be non
- * transactional.
- *
- * @param baseDN The base DN of the entry container to open.
- * @param name The name of the entry container or <CODE>NULL</CODE> to open
- * the default entry container for the given base DN.
- * @return The opened entry container.
- * @throws DatabaseException If an error occurs while opening the entry
- * container.
- * @throws ConfigException If an configuration error occurs while opening
- * the entry container.
- */
- EntryContainer openEntryContainer(DN baseDN, String name)
- throws DatabaseException, ConfigException
- {
- String databasePrefix;
- if(name == null || name.equals(""))
- {
- databasePrefix = baseDN.toNormalizedUrlSafeString();
- }
- else
- {
- databasePrefix = name;
- }
-
- EntryContainer ec = new EntryContainer(baseDN, databasePrefix, backend.getBackendID(), config, env, this);
- ec.open();
- return ec;
- }
-
- /**
- * Registers the entry container for a base DN.
- *
- * @param baseDN The base DN of the entry container to close.
- * @param entryContainer The entry container to register for the baseDN.
- * @throws InitializationException If an error occurs while opening the
- * entry container.
- */
- void registerEntryContainer(DN baseDN, EntryContainer entryContainer)
- throws InitializationException
- {
- EntryContainer ec1 = this.entryContainers.get(baseDN);
-
- // If an entry container for this baseDN is already open we don't allow
- // another to be opened.
- if (ec1 != null)
- {
- throw new InitializationException(ERR_ENTRY_CONTAINER_ALREADY_REGISTERED.get(ec1.getDatabasePrefix(), baseDN));
- }
-
- this.entryContainers.put(baseDN, entryContainer);
- }
-
- /**
- * Opens the entry containers for multiple base DNs.
- *
- * @param baseDNs The base DNs of the entry containers to open.
- * @throws DatabaseException If a database error occurs while opening
- * the entry container.
- * @throws InitializationException If an initialization error occurs while
- * opening the entry container.
- * @throws ConfigException If a configuration error occurs while
- * opening the entry container.
- */
- private void openAndRegisterEntryContainers(Set<DN> baseDNs)
- throws DatabaseException, InitializationException, ConfigException
- {
- EntryID id;
- EntryID highestID = null;
- for(DN baseDN : baseDNs)
- {
- EntryContainer ec = openEntryContainer(baseDN, null);
- id = ec.getHighestEntryID();
- registerEntryContainer(baseDN, ec);
- if(highestID == null || id.compareTo(highestID) > 0)
- {
- highestID = id;
- }
- }
-
- nextid = new AtomicLong(highestID.longValue() + 1);
- }
-
- /**
- * Unregisters the entry container for a base DN.
- *
- * @param baseDN The base DN of the entry container to close.
- * @return The entry container that was unregistered or NULL if a entry
- * container for the base DN was not registered.
- */
- EntryContainer unregisterEntryContainer(DN baseDN)
- {
- return entryContainers.remove(baseDN);
- }
-
- /**
- * Retrieves the compressed schema manager for this backend.
- *
- * @return The compressed schema manager for this backend.
- */
- public JECompressedSchema getCompressedSchema()
- {
- return compressedSchema;
- }
-
- /**
- * Get the DatabaseEnvironmentMonitor object for JE environment used by this
- * root container.
- *
- * @return The DatabaseEnvironmentMonito object.
- */
- public DatabaseEnvironmentMonitor getMonitorProvider()
- {
- if(monitor == null)
- {
- String monitorName = backend.getBackendID() + " Database Environment";
- monitor = new DatabaseEnvironmentMonitor(monitorName, this);
- }
-
- return monitor;
- }
-
- /**
- * Preload the database cache. There is no preload if the configured preload
- * time limit is zero.
- *
- * @param timeLimit The time limit for the preload process.
- */
- public void preload(long timeLimit)
- {
- if (timeLimit > 0)
- {
- // Get a list of all the databases used by the backend.
- ArrayList<DatabaseContainer> dbList = new ArrayList<>();
- for (EntryContainer ec : entryContainers.values())
- {
- ec.sharedLock.lock();
- try
- {
- ec.listDatabases(dbList);
- }
- finally
- {
- ec.sharedLock.unlock();
- }
- }
-
- // Sort the list in order of priority.
- Collections.sort(dbList, new DbPreloadComparator());
-
- // Preload each database until we reach the time limit or the cache
- // is filled.
- try
- {
- // Configure preload of Leaf Nodes (LNs) containing the data values.
- PreloadConfig preloadConfig = new PreloadConfig();
- preloadConfig.setLoadLNs(true);
-
- logger.info(NOTE_CACHE_PRELOAD_STARTED, backend.getBackendID());
-
- boolean isInterrupted = false;
-
- long timeEnd = System.currentTimeMillis() + timeLimit;
-
- for (DatabaseContainer db : dbList)
- {
- // Calculate the remaining time.
- long timeRemaining = timeEnd - System.currentTimeMillis();
- if (timeRemaining <= 0)
- {
- break;
- }
-
- preloadConfig.setMaxMillisecs(timeRemaining);
- PreloadStats preloadStats = db.preload(preloadConfig);
-
- if(logger.isTraceEnabled())
- {
- logger.trace("file=" + db.getName() + " LNs=" + preloadStats.getNLNsLoaded());
- }
-
- // Stop if the cache is full or the time limit has been exceeded.
- PreloadStatus preloadStatus = preloadStats.getStatus();
- if (preloadStatus != PreloadStatus.SUCCESS)
- {
- if (preloadStatus == PreloadStatus.EXCEEDED_TIME) {
- logger.info(NOTE_CACHE_PRELOAD_INTERRUPTED_BY_TIME, backend.getBackendID(), db.getName());
- } else if (preloadStatus == PreloadStatus.FILLED_CACHE) {
- logger.info(NOTE_CACHE_PRELOAD_INTERRUPTED_BY_SIZE, backend.getBackendID(), db.getName());
- } else {
- logger.info(NOTE_CACHE_PRELOAD_INTERRUPTED_UNKNOWN, backend.getBackendID(), db.getName());
- }
-
- isInterrupted = true;
- break;
- }
-
- logger.info(NOTE_CACHE_DB_PRELOADED, db.getName());
- }
-
- if (!isInterrupted) {
- logger.info(NOTE_CACHE_PRELOAD_DONE, backend.getBackendID());
- }
-
- // Log an informational message about the size of the cache.
- EnvironmentStats stats = env.getStats(new StatsConfig());
- long total = stats.getCacheTotalBytes();
-
- logger.info(NOTE_CACHE_SIZE_AFTER_PRELOAD, total / (1024 * 1024));
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
-
- logger.error(ERR_CACHE_PRELOAD, backend.getBackendID(),
- stackTraceToSingleLineString(e.getCause() != null ? e.getCause() : e));
- }
- }
- }
-
- /**
- * Closes this root container.
- *
- * @throws DatabaseException If an error occurs while attempting to close
- * the root container.
- */
- public void close() throws DatabaseException
- {
- for(DN baseDN : entryContainers.keySet())
- {
- EntryContainer ec = unregisterEntryContainer(baseDN);
- ec.exclusiveLock.lock();
- try
- {
- ec.close();
- }
- finally
- {
- ec.exclusiveLock.unlock();
- }
- }
-
- compressedSchema.close();
- config.removeLocalDBChangeListener(this);
-
- if (env != null)
- {
- env.close();
- env = null;
- }
- }
-
- /**
- * Return all the entry containers in this root container.
- *
- * @return The entry containers in this root container.
- */
- public Collection<EntryContainer> getEntryContainers()
- {
- return entryContainers.values();
- }
-
- /**
- * Returns all the baseDNs this root container stores.
- *
- * @return The set of DNs this root container stores.
- */
- public Set<DN> getBaseDNs()
- {
- return entryContainers.keySet();
- }
-
- /**
- * Return the entry container for a specific base DN.
- *
- * @param baseDN The base DN of the entry container to retrieve.
- * @return The entry container for the base DN.
- */
- public EntryContainer getEntryContainer(DN baseDN)
- {
- EntryContainer ec = null;
- DN nodeDN = baseDN;
-
- while (ec == null && nodeDN != null)
- {
- ec = entryContainers.get(nodeDN);
- if (ec == null)
- {
- nodeDN = nodeDN.getParentDNInSuffix();
- }
- }
-
- return ec;
- }
-
- /**
- * Get the environment stats of the JE environment used in this root
- * container.
- *
- * @param statsConfig The configuration to use for the EnvironmentStats
- * object.
- * @return The environment status of the JE environment.
- * @throws DatabaseException If an error occurs while retrieving the stats
- * object.
- */
- public EnvironmentStats getEnvironmentStats(StatsConfig statsConfig)
- throws DatabaseException
- {
- return env.getStats(statsConfig);
- }
-
- /**
- * Get the environment transaction stats of the JE environment used
- * in this root container.
- *
- * @param statsConfig The configuration to use for the EnvironmentStats
- * object.
- * @return The environment status of the JE environment.
- * @throws DatabaseException If an error occurs while retrieving the stats
- * object.
- */
- public TransactionStats getEnvironmentTransactionStats(
- StatsConfig statsConfig) throws DatabaseException
- {
- return env.getTransactionStats(statsConfig);
- }
-
- /**
- * Get the environment config of the JE environment used in this root
- * container.
- *
- * @return The environment config of the JE environment.
- * @throws DatabaseException If an error occurs while retrieving the
- * configuration object.
- */
- public EnvironmentConfig getEnvironmentConfig() throws DatabaseException
- {
- return env.getConfig();
- }
-
- /**
- * Get the backend configuration used by this root container.
- *
- * @return The JE backend configuration used by this root container.
- */
- public LocalDBBackendCfg getConfiguration()
- {
- return config;
- }
-
- /**
- * Get the total number of entries in this root container.
- *
- * @return The number of entries in this root container
- * @throws DatabaseException If an error occurs while retrieving the entry
- * count.
- */
- public long getEntryCount() throws DatabaseException
- {
- long entryCount = 0;
- for(EntryContainer ec : this.entryContainers.values())
- {
- ec.sharedLock.lock();
- try
- {
- entryCount += ec.getEntryCount();
- }
- finally
- {
- ec.sharedLock.unlock();
- }
- }
-
- return entryCount;
- }
-
- /**
- * Assign the next entry ID.
- *
- * @return The assigned entry ID.
- */
- public EntryID getNextEntryID()
- {
- return new EntryID(nextid.getAndIncrement());
- }
-
- /**
- * Return the lowest entry ID assigned.
- *
- * @return The lowest entry ID assigned.
- */
- public Long getLowestEntryID()
- {
- return 1L;
- }
-
- /**
- * Resets the next entry ID counter to zero. This should only be used after
- * clearing all databases.
- */
- public void resetNextEntryID()
- {
- nextid.set(1);
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public boolean isConfigurationChangeAcceptable(
- LocalDBBackendCfg cfg,
- List<LocalizableMessage> unacceptableReasons)
- {
- boolean acceptable = true;
-
- File parentDirectory = getFileForPath(config.getDBDirectory());
- File backendDirectory = new File(parentDirectory, config.getBackendId());
-
- //Make sure the directory either already exists or is able to create.
- if (!backendDirectory.exists())
- {
- if(!backendDirectory.mkdirs())
- {
- unacceptableReasons.add(ERR_CREATE_FAIL.get(backendDirectory.getPath()));
- acceptable = false;
- }
- else
- {
- backendDirectory.delete();
- }
- }
- //Make sure the directory is valid.
- else if (!backendDirectory.isDirectory())
- {
- unacceptableReasons.add(ERR_DIRECTORY_INVALID.get(backendDirectory.getPath()));
- acceptable = false;
- }
-
- try
- {
- FilePermission newBackendPermission =
- FilePermission.decodeUNIXMode(cfg.getDBDirectoryPermissions());
-
- //Make sure the mode will allow the server itself access to
- //the database
- if(!newBackendPermission.isOwnerWritable() ||
- !newBackendPermission.isOwnerReadable() ||
- !newBackendPermission.isOwnerExecutable())
- {
- LocalizableMessage message = ERR_CONFIG_BACKEND_INSANE_MODE.get(
- cfg.getDBDirectoryPermissions());
- unacceptableReasons.add(message);
- acceptable = false;
- }
- }
- catch(Exception e)
- {
- unacceptableReasons.add(ERR_CONFIG_BACKEND_MODE_INVALID.get(cfg.dn()));
- acceptable = false;
- }
-
- try
- {
- ConfigurableEnvironment.parseConfigEntry(cfg);
- }
- catch (Exception e)
- {
- unacceptableReasons.add(LocalizableMessage.raw(e.getLocalizedMessage()));
- acceptable = false;
- }
-
- return acceptable;
- }
-
-
-
- /** {@inheritDoc} */
- @Override
- public ConfigChangeResult applyConfigurationChange(LocalDBBackendCfg cfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
-
- try
- {
- if(env != null)
- {
- // Check if any JE non-mutable properties were changed.
- EnvironmentConfig oldEnvConfig = env.getConfig();
- EnvironmentConfig newEnvConfig =
- ConfigurableEnvironment.parseConfigEntry(cfg);
- Map<?,?> paramsMap = EnvironmentParams.SUPPORTED_PARAMS;
-
- // Iterate through native JE properties.
- SortedSet<String> jeProperties = cfg.getJEProperty();
- for (String jeEntry : jeProperties) {
- // There is no need to validate properties yet again.
- StringTokenizer st = new StringTokenizer(jeEntry, "=");
- if (st.countTokens() == 2) {
- String jePropertyName = st.nextToken();
- String jePropertyValue = st.nextToken();
- ConfigParam param = (ConfigParam) paramsMap.get(jePropertyName);
- if (!param.isMutable()) {
- String oldValue = oldEnvConfig.getConfigParam(param.getName());
- if (!oldValue.equalsIgnoreCase(jePropertyValue)) {
- ccr.setAdminActionRequired(true);
- ccr.addMessage(INFO_CONFIG_JE_PROPERTY_REQUIRES_RESTART.get(jePropertyName));
- if(logger.isTraceEnabled()) {
- logger.trace("The change to the following property " +
- "will take effect when the component is restarted: " +
- jePropertyName);
- }
- }
- }
- }
- }
-
- // Iterate through JE configuration attributes.
- for (Object o : paramsMap.values())
- {
- ConfigParam param = (ConfigParam) o;
- if (!param.isMutable())
- {
- String oldValue = oldEnvConfig.getConfigParam(param.getName());
- String newValue = newEnvConfig.getConfigParam(param.getName());
- if (!oldValue.equalsIgnoreCase(newValue))
- {
- ccr.setAdminActionRequired(true);
- String configAttr = ConfigurableEnvironment.
- getAttributeForProperty(param.getName());
- if (configAttr != null)
- {
- ccr.addMessage(NOTE_JEB_CONFIG_ATTR_REQUIRES_RESTART.get(configAttr));
- }
- else
- {
- ccr.addMessage(NOTE_JEB_CONFIG_ATTR_REQUIRES_RESTART.get(param.getName()));
- }
- if(logger.isTraceEnabled())
- {
- logger.trace("The change to the following property will " +
- "take effect when the backend is restarted: " +
- param.getName());
- }
- }
- }
- }
-
- // This takes care of changes to the JE environment for those
- // properties that are mutable at runtime.
- env.setMutableConfig(newEnvConfig);
-
- logger.trace("JE database configuration: %s", env.getConfig());
- }
-
- // Create the directory if it doesn't exist.
- if(!cfg.getDBDirectory().equals(this.config.getDBDirectory()))
- {
- File parentDirectory = getFileForPath(cfg.getDBDirectory());
- File backendDirectory =
- new File(parentDirectory, cfg.getBackendId());
-
- if (!backendDirectory.exists())
- {
- if(!backendDirectory.mkdirs())
- {
- ccr.addMessage(ERR_CREATE_FAIL.get(backendDirectory.getPath()));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- return ccr;
- }
- }
- //Make sure the directory is valid.
- else if (!backendDirectory.isDirectory())
- {
- ccr.addMessage(ERR_DIRECTORY_INVALID.get(backendDirectory.getPath()));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- return ccr;
- }
-
- ccr.setAdminActionRequired(true);
- ccr.addMessage(NOTE_CONFIG_DB_DIR_REQUIRES_RESTART.get(this.config.getDBDirectory(), cfg.getDBDirectory()));
- }
-
- if(!cfg.getDBDirectoryPermissions().equalsIgnoreCase(
- config.getDBDirectoryPermissions()) ||
- !cfg.getDBDirectory().equals(this.config.getDBDirectory()))
- {
- FilePermission backendPermission;
- try
- {
- backendPermission =
- FilePermission.decodeUNIXMode(cfg.getDBDirectoryPermissions());
- }
- catch(Exception e)
- {
- ccr.addMessage(ERR_CONFIG_BACKEND_MODE_INVALID.get(config.dn()));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- return ccr;
- }
-
- //Make sure the mode will allow the server itself access to
- //the database
- if(!backendPermission.isOwnerWritable() ||
- !backendPermission.isOwnerReadable() ||
- !backendPermission.isOwnerExecutable())
- {
- ccr.addMessage(ERR_CONFIG_BACKEND_INSANE_MODE.get(
- cfg.getDBDirectoryPermissions()));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- return ccr;
- }
-
- // Get the backend database backendDirectory permissions and apply
- File parentDirectory = getFileForPath(config.getDBDirectory());
- File backendDirectory = new File(parentDirectory, config.getBackendId());
- try
- {
- if (!FilePermission.setPermissions(backendDirectory, backendPermission))
- {
- logger.warn(WARN_UNABLE_SET_PERMISSIONS, backendPermission, backendDirectory);
- }
- }
- catch(Exception e)
- {
- // Log an warning that the permissions were not set.
- logger.warn(WARN_SET_PERMISSIONS_FAILED, backendDirectory, e);
- }
- }
-
- getMonitorProvider().enableFilterUseStats(
- cfg.isIndexFilterAnalyzerEnabled());
- getMonitorProvider()
- .setMaxEntries(cfg.getIndexFilterAnalyzerMaxFilters());
-
- this.config = cfg;
- }
- catch (Exception e)
- {
- ccr.addMessage(LocalizableMessage.raw(stackTraceToSingleLineString(e)));
- ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
- return ccr;
- }
-
- return ccr;
- }
-
- /**
- * Returns whether this container JE database environment is
- * open, valid and can be used.
- *
- * @return {@code true} if valid, or {@code false} otherwise.
- */
- public boolean isValid() {
- return env.isValid();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/SortValues.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/SortValues.java
deleted file mode 100644
index 8ee2627..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/SortValues.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2008 Sun Microsystems, Inc.
- * Portions Copyright 2014 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import java.util.List;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.opends.server.types.Attribute;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.Entry;
-import org.opends.server.types.SortKey;
-import org.opends.server.types.SortOrder;
-
-/**
- * This class defines a data structure that holds a set of attribute values that
- * are associated with a sort order for a given entry. Any or all of the
- * attribute values may be {@code null} if the entry does not include any values
- * for the attribute type targeted by the corresponding sort key.
- * <BR><BR>
- * This class implements the {@code Comparable} interface and may therefore be
- * used to order the elements in components like {@code TreeMap} and
- * {@code TreeSet}.
- * <p>
- * FIXME: replace with the SDK's SortKey?
- */
-public class SortValues
- implements Comparable<SortValues>
-{
- /** The set of sort keys (attribute values) in this sort order. */
- private ByteString[] values;
- /**
- * The types of sort keys.
- *
- * @see #values
- */
- private AttributeType[] types;
-
- /** The entry ID for the entry associated with this sort values. */
- private EntryID entryID;
-
- /** The sort order for this set of sort values. */
- private SortOrder sortOrder;
-
-
-
- /**
- * Creates a new sort values object with the provided information.
- *
- * @param entryID The entry ID for the entry associated with this set of
- * values.
- * @param values The attribute values for this sort values.
- * @param sortOrder The sort order to use to obtain the necessary values.
- */
- public SortValues(EntryID entryID, ByteString[] values,
- SortOrder sortOrder)
- {
- this.entryID = entryID;
- this.sortOrder = sortOrder;
- this.values = values;
-
- final SortKey[] sortKeys = sortOrder.getSortKeys();
- this.types = new AttributeType[sortKeys.length];
- for (int i = 0; i < sortKeys.length; i++)
- {
- types[i] = sortKeys[i].getAttributeType();
- }
- }
-
- /**
- * Creates a new sort values object with the provided information.
- *
- * @param entryID The entry ID for the entry associated with this set of
- * values.
- * @param entry The entry containing the values to extract and use when
- * sorting.
- * @param sortOrder The sort order to use to obtain the necessary values.
- */
- public SortValues(EntryID entryID, Entry entry, SortOrder sortOrder)
- {
- this.entryID = entryID;
- this.sortOrder = sortOrder;
-
- SortKey[] sortKeys = sortOrder.getSortKeys();
- this.values = new ByteString[sortKeys.length];
- this.types = new AttributeType[sortKeys.length];
- for (int i=0; i < sortKeys.length; i++)
- {
- SortKey sortKey = sortKeys[i];
- types[i] = sortKey.getAttributeType();
- List<Attribute> attrList = entry.getAttribute(types[i]);
- if (attrList != null)
- {
- values[i] = findBestMatchingValue(sortKey, attrList);
- }
- }
- }
-
- /**
- * Finds the best matching attribute value for the provided sort key in the
- * provided attribute list.
- * <p>
- * There may be multiple versions of this attribute in the target entry (e.g.,
- * with different sets of options), and it may also be a multivalued
- * attribute. In that case, we need to find the value that is the best match
- * for the corresponding sort key (i.e., for sorting in ascending order, we
- * want to find the lowest value; for sorting in descending order, we want to
- * find the highest value). This is handled by the SortKey.compareValues
- * method.
- */
- private ByteString findBestMatchingValue(SortKey sortKey, List<Attribute> attrList)
- {
- ByteString sortValue = null;
- for (Attribute a : attrList)
- {
- for (ByteString v : a)
- {
- if (sortValue == null || sortKey.compareValues(v, sortValue) < 0)
- {
- sortValue = v;
- }
- }
- }
- return sortValue;
- }
-
- /**
- * Compares this set of sort values with the provided set of values to
- * determine their relative order in a sorted list.
- *
- * @param sortValues The set of values to compare against this sort values.
- * It must also have the same sort order as this set of
- * values.
- *
- * @return A negative value if this sort values object should come before the
- * provided values in a sorted list, a positive value if this sort
- * values object should come after the provided values in a sorted
- * list, or zero if there is no significant difference in their
- * relative order.
- */
- @Override
- public int compareTo(SortValues sortValues)
- {
- SortKey[] sortKeys = sortOrder.getSortKeys();
-
- for (int i=0; i < values.length; i++)
- {
- int compareValue = sortKeys[i].compareValues(values[i], sortValues.values[i]);
- if (compareValue != 0)
- {
- return compareValue;
- }
- }
-
- // If we've gotten here, then we can't tell a difference between the sets of
- // sort values, so sort based on entry ID.
- return entryID.compareTo(sortValues.entryID);
- }
-
- /**
- * Compares the first element in this set of sort values with the provided
- * assertion value to determine whether the assertion value is greater than or
- * equal to the initial sort value. This is used during VLV processing to
- * find the offset by assertion value.
- *
- * @param assertionValue The assertion value to compare against the first
- * sort value.
- *
- * @return A negative value if the provided assertion value should come
- * before the first sort value, zero if the provided assertion value
- * is equal to the first sort value, or a positive value if the
- * provided assertion value should come after the first sort value.
- */
- public int compareTo(ByteString assertionValue)
- {
- SortKey sortKey = sortOrder.getSortKeys()[0];
- return sortKey.compareValues(values[0], assertionValue);
- }
-
- /**
- * Retrieves a string representation of this sort values object.
- *
- * @return A string representation of this sort values object.
- */
- @Override
- public String toString()
- {
- StringBuilder buffer = new StringBuilder();
- toString(buffer);
- return buffer.toString();
- }
-
- /**
- * Appends a string representation of this sort values object to the provided
- * buffer.
- *
- * @param buffer The buffer to which the information should be appended.
- */
- public void toString(StringBuilder buffer)
- {
- buffer.append("SortValues(");
-
- SortKey[] sortKeys = sortOrder.getSortKeys();
- for (int i=0; i < sortKeys.length; i++)
- {
- if (i > 0)
- {
- buffer.append(",");
- }
-
- buffer.append(sortKeys[i].ascending() ? "+" : "-");
-
- buffer.append(sortKeys[i].getAttributeType().getNameOrOID());
- buffer.append("=");
- buffer.append(values[i]);
- }
-
- buffer.append(", id=");
- buffer.append(entryID);
- buffer.append(")");
- }
-
- /**
- * Retrieve the attribute values in this sort values.
- *
- * @return The array of attribute values for this sort values.
- */
- public ByteString[] getValues()
- {
- return values;
- }
-
- /**
- * Retrieve the type of the attribute values in this sort values.
- *
- * @return The array of type of the attribute values for this sort values.
- */
- public AttributeType[] getTypes()
- {
- return types;
- }
-
- /**
- * Retrieve the entry ID in this sort values.
- *
- * @return The entry ID for this sort values.
- */
- public long getEntryID()
- {
- return entryID.longValue();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/SortValuesSet.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/SortValuesSet.java
deleted file mode 100644
index 5fae2c5..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/SortValuesSet.java
+++ /dev/null
@@ -1,700 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.opendj.ldap.schema.MatchingRule;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.SortKey;
-
-import com.sleepycat.je.DatabaseException;
-
-/**
- * This class represents a partial sorted set of sorted entries in a VLV index.
- */
-public class SortValuesSet
-{
- private long[] entryIDs;
-
- private int[] valuesBytesOffsets;
- private byte[] valuesBytes;
-
- private byte[] keyBytes;
-
- private VLVIndex vlvIndex;
-
- /**
- * Construct an empty sort values set with the given information.
- *
- * @param vlvIndex The VLV index using this set.
- */
- public SortValuesSet(VLVIndex vlvIndex)
- {
- this.keyBytes = new byte[0];
- this.entryIDs = null;
- this.valuesBytes = null;
- this.valuesBytesOffsets = null;
- this.vlvIndex = vlvIndex;
- }
-
- /**
- * Construct a sort values set from the database.
- *
- * @param keyBytes The database key used to locate this set.
- * @param dataBytes The bytes to decode and construct this set.
- * @param vlvIndex The VLV index using this set.
- */
- public SortValuesSet(byte[] keyBytes, byte[] dataBytes, VLVIndex vlvIndex)
- {
- this.keyBytes = keyBytes;
- this.vlvIndex = vlvIndex;
- if(dataBytes == null)
- {
- entryIDs = new long[0];
- return;
- }
-
- entryIDs = getEncodedIDs(dataBytes, 0);
- int valuesBytesOffset = entryIDs.length * 8 + 4;
- int valuesBytesLength = dataBytes.length - valuesBytesOffset;
- valuesBytes = new byte[valuesBytesLength];
- System.arraycopy(dataBytes, valuesBytesOffset, valuesBytes, 0,
- valuesBytesLength);
- this.valuesBytesOffsets = null;
- }
-
- private SortValuesSet()
- {}
-
- /**
- * Add the given entryID and values from these sort values.
- *
- * @param sv The sort values to add.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- void add(SortValues sv) throws DatabaseException, DirectoryException
- {
- add(sv.getEntryID(), sv.getValues(), sv.getTypes());
- }
-
- /**
- * Add the given entryID and values from this VLV index.
- *
- * @param entryID The entry ID to add.
- * @param values The values to add.
- * @param types The types of the values to add.
- * @return True if the information was successfully added or False
- * otherwise.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public boolean add(long entryID, ByteString[] values, AttributeType[] types)
- throws DatabaseException, DirectoryException
- {
- if(values == null)
- {
- return false;
- }
-
- if(entryIDs == null || entryIDs.length == 0)
- {
- entryIDs = new long[] { entryID };
- valuesBytes = attributeValuesToDatabase(values, types);
- if(valuesBytesOffsets != null)
- {
- valuesBytesOffsets = new int[] { 0 };
- }
- return true;
- }
- if (vlvIndex.comparator.compare(
- this, entryIDs.length - 1, entryID, values) < 0)
- {
- long[] updatedEntryIDs = new long[entryIDs.length + 1];
- System.arraycopy(entryIDs, 0, updatedEntryIDs, 0, entryIDs.length);
- updatedEntryIDs[entryIDs.length] = entryID;
-
- byte[] newValuesBytes = attributeValuesToDatabase(values, types);
- byte[] updatedValuesBytes = new byte[valuesBytes.length +
- newValuesBytes.length];
- System.arraycopy(valuesBytes, 0, updatedValuesBytes, 0,
- valuesBytes.length);
- System.arraycopy(newValuesBytes, 0, updatedValuesBytes,
- valuesBytes.length,
- newValuesBytes.length);
-
- if(valuesBytesOffsets != null)
- {
- int[] updatedValuesBytesOffsets =
- new int[valuesBytesOffsets.length + 1];
- System.arraycopy(valuesBytesOffsets, 0, updatedValuesBytesOffsets,
- 0, valuesBytesOffsets.length);
- updatedValuesBytesOffsets[valuesBytesOffsets.length] =
- updatedValuesBytes.length - newValuesBytes.length;
- valuesBytesOffsets = updatedValuesBytesOffsets;
- }
-
- entryIDs = updatedEntryIDs;
- valuesBytes = updatedValuesBytes;
- return true;
- }
- else
- {
- int pos = binarySearch(entryID, values);
- if(pos >= 0)
- {
- if(entryIDs[pos] == entryID)
- {
- // The entry ID is alreadly present.
- return false;
- }
- }
- else
- {
- // For a negative return value r, the vlvIndex -(r+1) gives the array
- // ndex at which the specified value can be inserted to maintain
- // the sorted order of the array.
- pos = -(pos+1);
- }
-
- long[] updatedEntryIDs = new long[entryIDs.length + 1];
- System.arraycopy(entryIDs, 0, updatedEntryIDs, 0, pos);
- System.arraycopy(entryIDs, pos, updatedEntryIDs, pos+1,
- entryIDs.length-pos);
- updatedEntryIDs[pos] = entryID;
-
- byte[] newValuesBytes = attributeValuesToDatabase(values, types);
- // BUG valuesBytesOffsets might be null ? If not why testing below ?
- int valuesPos = valuesBytesOffsets[pos];
- byte[] updatedValuesBytes = new byte[valuesBytes.length +
- newValuesBytes.length];
- System.arraycopy(valuesBytes, 0, updatedValuesBytes, 0, valuesPos);
- System.arraycopy(valuesBytes, valuesPos, updatedValuesBytes,
- valuesPos + newValuesBytes.length,
- valuesBytes.length - valuesPos);
- System.arraycopy(newValuesBytes, 0, updatedValuesBytes, valuesPos,
- newValuesBytes.length);
-
- if(valuesBytesOffsets != null)
- {
- int[] updatedValuesBytesOffsets =
- new int[valuesBytesOffsets.length + 1];
- System.arraycopy(valuesBytesOffsets, 0, updatedValuesBytesOffsets,
- 0, pos);
- // Update the rest of the offsets one by one - Expensive!
- for(int i = pos; i < valuesBytesOffsets.length; i++)
- {
- updatedValuesBytesOffsets[i+1] =
- valuesBytesOffsets[i] + newValuesBytes.length;
- }
- updatedValuesBytesOffsets[pos] = valuesBytesOffsets[pos];
- valuesBytesOffsets = updatedValuesBytesOffsets;
- }
-
- entryIDs = updatedEntryIDs;
- valuesBytes = updatedValuesBytes;
- }
-
- return true;
- }
-
- /**
- * Remove the given entryID and values from these sort values.
- *
- * @param sv The sort values to remove.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- void remove(SortValues sv) throws DatabaseException, DirectoryException
- {
- if(entryIDs == null || entryIDs.length == 0)
- {
- return;
- }
-
- if(valuesBytesOffsets == null)
- {
- updateValuesBytesOffsets();
- }
-
- int pos = binarySearch(sv.getEntryID(), sv.getValues());
- if(pos < 0)
- {
- // Not found.
- return;
- }
-
- // Found it.
- long[] updatedEntryIDs = new long[entryIDs.length - 1];
- System.arraycopy(entryIDs, 0, updatedEntryIDs, 0, pos);
- System.arraycopy(entryIDs, pos+1, updatedEntryIDs, pos,
- entryIDs.length-pos-1);
- int valuesLength;
- int valuesPos = valuesBytesOffsets[pos];
- if(pos < valuesBytesOffsets.length - 1)
- {
- valuesLength = valuesBytesOffsets[pos+1] - valuesPos;
- }
- else
- {
- valuesLength = valuesBytes.length - valuesPos;
- }
- byte[] updatedValuesBytes = new byte[valuesBytes.length - valuesLength];
- System.arraycopy(valuesBytes, 0, updatedValuesBytes, 0, valuesPos);
- System.arraycopy(valuesBytes, valuesPos + valuesLength,
- updatedValuesBytes, valuesPos,
- valuesBytes.length - valuesPos - valuesLength);
-
- int[] updatedValuesBytesOffsets = new int[valuesBytesOffsets.length - 1];
- System.arraycopy(valuesBytesOffsets, 0, updatedValuesBytesOffsets, 0, pos);
- // Update the rest of the offsets one by one - Expensive!
- for(int i = pos + 1; i < valuesBytesOffsets.length; i++)
- {
- updatedValuesBytesOffsets[i - 1] = valuesBytesOffsets[i] - valuesLength;
- }
-
- entryIDs = updatedEntryIDs;
- valuesBytes = updatedValuesBytes;
- valuesBytesOffsets = updatedValuesBytesOffsets;
- }
-
- /**
- * Split portions of this set into another set. The values of the new set is
- * from the end of this set.
- *
- * @param splitLength The size of the new set.
- * @return The split set.
- */
- public SortValuesSet split(int splitLength)
- {
- if(valuesBytesOffsets == null)
- {
- updateValuesBytesOffsets();
- }
-
- long[] splitEntryIDs = new long[splitLength];
- byte[] splitValuesBytes = new byte[valuesBytes.length -
- valuesBytesOffsets[valuesBytesOffsets.length - splitLength]];
- int[] splitValuesBytesOffsets = new int[splitLength];
-
- long[] updatedEntryIDs = new long[entryIDs.length - splitEntryIDs.length];
- System.arraycopy(entryIDs, 0, updatedEntryIDs, 0, updatedEntryIDs.length);
- System.arraycopy(entryIDs, updatedEntryIDs.length, splitEntryIDs, 0,
- splitEntryIDs.length);
-
- byte[] updatedValuesBytes =
- new byte[valuesBytesOffsets[valuesBytesOffsets.length - splitLength]];
- System.arraycopy(valuesBytes, 0, updatedValuesBytes, 0,
- updatedValuesBytes.length);
- System.arraycopy(valuesBytes, updatedValuesBytes.length, splitValuesBytes,
- 0, splitValuesBytes.length);
-
- int[] updatedValuesBytesOffsets =
- new int[valuesBytesOffsets.length - splitValuesBytesOffsets.length];
- System.arraycopy(valuesBytesOffsets, 0, updatedValuesBytesOffsets,
- 0, updatedValuesBytesOffsets.length);
- for(int i = updatedValuesBytesOffsets.length;
- i < valuesBytesOffsets.length; i++)
- {
- splitValuesBytesOffsets[i - updatedValuesBytesOffsets.length] =
- valuesBytesOffsets[i] -
- valuesBytesOffsets[updatedValuesBytesOffsets.length];
- }
-
- SortValuesSet splitValuesSet = new SortValuesSet();
-
- splitValuesSet.entryIDs = splitEntryIDs;
- splitValuesSet.keyBytes = this.keyBytes;
- splitValuesSet.valuesBytes = splitValuesBytes;
- splitValuesSet.valuesBytesOffsets = splitValuesBytesOffsets;
- splitValuesSet.vlvIndex = this.vlvIndex;
-
- entryIDs = updatedEntryIDs;
- valuesBytes = updatedValuesBytes;
- valuesBytesOffsets = updatedValuesBytesOffsets;
- keyBytes = null;
-
- return splitValuesSet;
- }
-
- /**
- * Encode this set to its database format.
- *
- * @return The encoded bytes representing this set or null if
- * this set is empty.
- */
- public byte[] toDatabase()
- {
- if(size() == 0)
- {
- return null;
- }
-
- byte[] entryIDBytes = JebFormat.entryIDListToDatabase(entryIDs);
- byte[] concatBytes = new byte[entryIDBytes.length + valuesBytes.length + 4];
- int v = entryIDs.length;
-
- for (int j = 3; j >= 0; j--)
- {
- concatBytes[j] = (byte) (v & 0xFF);
- v >>>= 8;
- }
-
- System.arraycopy(entryIDBytes, 0, concatBytes, 4, entryIDBytes.length);
- System.arraycopy(valuesBytes, 0, concatBytes, entryIDBytes.length+4,
- valuesBytes.length);
-
- return concatBytes;
- }
-
- /**
- * Get the size of the provided encoded set.
- *
- * @param bytes The encoded bytes of a SortValuesSet to decode the size from.
- * @param offset The byte offset to start decoding.
- * @return The size of the provided encoded set.
- */
- public static int getEncodedSize(byte[] bytes, int offset)
- {
- int v = 0;
- for (int i = offset; i < offset + 4; i++)
- {
- v <<= 8;
- v |= bytes[i] & 0xFF;
- }
- return v;
- }
-
- /**
- * Get the IDs from the provided encoded set.
- *
- * @param bytes The encoded bytes of a SortValuesSet to decode the IDs from.
- * @param offset The byte offset to start decoding.
- * @return The decoded IDs in the provided encoded set.
- */
- public static long[] getEncodedIDs(byte[] bytes, int offset)
- {
- int length = getEncodedSize(bytes, offset);
- byte[] entryIDBytes = new byte[length * 8];
- System.arraycopy(bytes, offset+4, entryIDBytes, 0, entryIDBytes.length);
- return JebFormat.entryIDListFromDatabase(entryIDBytes);
- }
-
- /**
- * Searches this set for the specified values and entry ID using the binary
- * search algorithm.
- *
- * @param entryID The entry ID to match or -1 if not matching on entry ID.
- * @param values The values to match.
- * @return Index of the entry matching the values and optionally the entry ID
- * if it is found or a negative index if its not found.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- int binarySearch(long entryID, ByteString... values)
- throws DatabaseException, DirectoryException
- {
- if(entryIDs == null || entryIDs.length == 0)
- {
- return -1;
- }
-
- int i = 0;
- for(int j = entryIDs.length - 1; i <= j;)
- {
- int k = i + j >> 1;
- int l = vlvIndex.comparator.compare(this, k, entryID, values);
- if (l < 0)
- {
- i = k + 1;
- }
- else if (l > 0)
- {
- j = k - 1;
- }
- else
- {
- return k;
- }
- }
-
- return -(i + 1);
- }
-
- /**
- * Retrieve the size of this set.
- *
- * @return The size of this set.
- */
- public int size()
- {
- if(entryIDs == null)
- {
- return 0;
- }
-
- return entryIDs.length;
- }
-
- /**
- * Retrieve the entry IDs in this set.
- *
- * @return The entry IDs in this set.
- */
- public long[] getEntryIDs()
- {
- return entryIDs;
- }
-
- private byte[] attributeValuesToDatabase(ByteString[] values,
- AttributeType[] types) throws DirectoryException
- {
- try
- {
- final ByteStringBuilder builder = new ByteStringBuilder();
-
- for (int i = 0; i < values.length; i++)
- {
- final ByteString v = values[i];
- if (v == null)
- {
- builder.appendBERLength(0);
- }
- else
- {
- final MatchingRule eqRule = types[i].getEqualityMatchingRule();
- final ByteString nv = eqRule.normalizeAttributeValue(v);
- builder.appendBERLength(nv.length());
- builder.appendBytes(nv);
- }
- }
- builder.trimToSize();
-
- return builder.getBackingArray();
- }
- catch (DecodeException e)
- {
- throw new DirectoryException(
- ResultCode.INVALID_ATTRIBUTE_SYNTAX, e.getMessageObject(), e);
- }
- }
-
- /**
- * Returns the key to use for this set of sort values in the database.
- *
- * @return The key as an array of bytes that should be used for this set in
- * the database or NULL if this set is empty.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public byte[] getKeyBytes()
- throws DatabaseException, DirectoryException
- {
- if(entryIDs == null || entryIDs.length == 0)
- {
- return null;
- }
-
- if(keyBytes != null)
- {
- return keyBytes;
- }
-
- if(valuesBytesOffsets == null)
- {
- updateValuesBytesOffsets();
- }
-
- int vBytesPos = valuesBytesOffsets[valuesBytesOffsets.length - 1];
- int vBytesLength = valuesBytes.length - vBytesPos;
-
- byte[] idBytes =
- JebFormat.entryIDToDatabase(entryIDs[entryIDs.length - 1]);
- keyBytes =
- new byte[vBytesLength + idBytes.length];
-
- System.arraycopy(valuesBytes, vBytesPos, keyBytes, 0, vBytesLength);
- System.arraycopy(idBytes, 0, keyBytes, vBytesLength, idBytes.length);
-
- return keyBytes;
- }
-
- /**
- * Returns the key to use for this set of sort values in the database.
- *
- * @return The key as a sort values object that should be used for this set in
- * the database or NULL if this set is empty or unbounded.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public SortValues getKeySortValues()
- throws DatabaseException, DirectoryException
- {
- if(entryIDs == null || entryIDs.length == 0)
- {
- return null;
- }
-
- if(keyBytes != null && keyBytes.length == 0)
- {
- return null;
- }
-
- EntryID id = new EntryID(entryIDs[entryIDs.length - 1]);
- SortKey[] sortKeys = vlvIndex.sortOrder.getSortKeys();
- int numValues = sortKeys.length;
- ByteString[] values = new ByteString[numValues];
- for (int i = (entryIDs.length - 1) * numValues, j = 0;
- i < entryIDs.length * numValues;
- i++, j++)
- {
- values[j] = getValue(i);
- }
-
- return new SortValues(id, values, vlvIndex.sortOrder);
- }
-
- /**
- * Returns the sort values at the index in this set.
- *
- * @param index The index of the sort values to get.
- * @return The sort values object at the specified index.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws JebException If an error occurs in the JE database.
- **/
- public SortValues getSortValues(int index)
- throws JebException, DatabaseException, DirectoryException
- {
- if(entryIDs == null || entryIDs.length == 0)
- {
- return null;
- }
-
- EntryID id = new EntryID(entryIDs[index]);
- SortKey[] sortKeys = vlvIndex.sortOrder.getSortKeys();
- int numValues = sortKeys.length;
- ByteString[] values = new ByteString[numValues];
- for (int i = index * numValues, j = 0;
- i < (index + 1) * numValues;
- i++, j++)
- {
- values[j] = getValue(i);
- }
-
- return new SortValues(id, values, vlvIndex.sortOrder);
- }
-
- private void updateValuesBytesOffsets()
- {
- valuesBytesOffsets = new int[entryIDs.length];
- int vBytesPos = 0;
- int numAttributes = vlvIndex.sortOrder.getSortKeys().length;
-
- for(int pos = 0; pos < entryIDs.length; pos++)
- {
- valuesBytesOffsets[pos] = vBytesPos;
-
- for(int i = 0; i < numAttributes; i++)
- {
- int valueLength = valuesBytes[vBytesPos] & 0x7F;
- if (valueLength != valuesBytes[vBytesPos++])
- {
- int valueLengthBytes = valueLength;
- valueLength = 0;
- for (int j=0; j < valueLengthBytes; j++, vBytesPos++)
- {
- valueLength = (valueLength << 8) | (valuesBytes[vBytesPos] & 0xFF);
- }
- }
-
- vBytesPos += valueLength;
- }
- }
- }
-
- /**
- * Retrieve an attribute value from this values set. The index is the
- * absolute index. (ie. for a sort on 3 attributes per entry, an vlvIndex of 6
- * will be the 1st attribute value of the 3rd entry).
- *
- * @param index The vlvIndex of the attribute value to retrieve.
- * @return The byte array representation of the attribute value.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public ByteString getValue(int index)
- throws DatabaseException, DirectoryException
- {
- if(valuesBytesOffsets == null)
- {
- updateValuesBytesOffsets();
- }
- int numAttributes = vlvIndex.sortOrder.getSortKeys().length;
- int vIndex = index / numAttributes;
- int vOffset = index % numAttributes;
- int vBytesPos = valuesBytesOffsets[vIndex];
-
- // Find the desired value in the sort order set.
- for(int i = 0; i <= vOffset; i++)
- {
- int valueLength = valuesBytes[vBytesPos] & 0x7F;
- if (valueLength != valuesBytes[vBytesPos++])
- {
- int valueLengthBytes = valueLength;
- valueLength = 0;
- for (int j=0; j < valueLengthBytes; j++, vBytesPos++)
- {
- valueLength = (valueLength << 8) | (valuesBytes[vBytesPos] & 0xFF);
- }
- }
-
- if(i == vOffset)
- {
- if(valueLength == 0)
- {
- return null;
- }
- else
- {
- byte[] valueBytes = new byte[valueLength];
- System.arraycopy(valuesBytes, vBytesPos, valueBytes, 0, valueLength);
- return ByteString.wrap(valueBytes);
- }
- }
- else
- {
- vBytesPos += valueLength;
- }
- }
- return ByteString.empty();
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/State.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/State.java
deleted file mode 100644
index 32d79b7..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/State.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static com.sleepycat.je.LockMode.*;
-import static com.sleepycat.je.OperationStatus.*;
-
-import java.util.Arrays;
-
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.*;
-
-/**
- * This class is responsible for storing the configuration state of
- * the JE backend for a particular suffix.
- */
-public class State extends DatabaseContainer
-{
- private static final byte[] falseBytes = new byte[]{0x00};
- private static final byte[] trueBytes = new byte[]{0x01};
-
- /**
- * Create a new State object.
- *
- * @param name The name of the entry database.
- * @param env The JE Environment.
- * @param entryContainer The entryContainer of the entry database.
- * @throws com.sleepycat.je.DatabaseException If an error occurs in the
- * JE database.
- *
- */
- State(String name, Environment env, EntryContainer entryContainer)
- throws DatabaseException
- {
- super(name, env, entryContainer);
- this.dbConfig = JEBUtils.toDatabaseConfigNoDuplicates(env);
- }
-
- /**
- * Return the key associated with the index in the state database.
- *
- * @param index The index we need the key for.
- * @return the key
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private DatabaseEntry keyForIndex(DatabaseContainer index)
- throws DatabaseException
- {
- String shortName =
- index.getName().replace(entryContainer.getDatabasePrefix(), "");
- return new DatabaseEntry(StaticUtils.getBytes(shortName));
- }
-
- /**
- * Remove a record from the entry database.
- *
- * @param txn The database transaction or null if none.
- * @param index The index storing the trusted state info.
- * @return true if the entry was removed, false if it was not.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- boolean removeIndexTrustState(Transaction txn, DatabaseContainer index)
- throws DatabaseException
- {
- DatabaseEntry key = keyForIndex(index);
-
- return delete(txn, key) == SUCCESS;
- }
-
- /**
- * Fetch index state from the database.
- * @param txn The database transaction or null if none.
- * @param index The index storing the trusted state info.
- * @return The trusted state of the index in the database.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public boolean getIndexTrustState(Transaction txn, DatabaseContainer index)
- throws DatabaseException
- {
- DatabaseEntry key = keyForIndex(index);
- DatabaseEntry data = new DatabaseEntry();
-
- if (read(txn, key, data, DEFAULT) == SUCCESS)
- {
- byte[] bytes = data.getData();
- return Arrays.equals(bytes, trueBytes);
- }
- return false;
- }
-
- /**
- * Put index state to database.
- * @param txn The database transaction or null if none.
- * @param index The index storing the trusted state info.
- * @param trusted The state value to put into the database.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- void putIndexTrustState(Transaction txn, DatabaseContainer index, boolean trusted)
- throws DatabaseException
- {
- DatabaseEntry key = keyForIndex(index);
- DatabaseEntry data = new DatabaseEntry();
-
- data.setData(trusted ? trueBytes : falseBytes);
- put(txn, key, data);
- }
-
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Suffix.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Suffix.java
deleted file mode 100644
index 972eedd..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/Suffix.java
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2009-2010 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.messages.BackendMessages.*;
-
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.opends.server.backends.jeb.Importer.DNCache;
-import org.opends.server.types.DN;
-
-import com.sleepycat.je.DatabaseException;
-import com.sleepycat.je.LockMode;
-
-/**
- * The class represents a suffix that is to be loaded during an import, or
- * rebuild index process. Multiple instances of this class can be instantiated
- * during and import to support multiple suffixes in a backend. A rebuild
- * index has only one of these instances.
- */
-class Suffix
-{
-
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- private final List<DN> includeBranches, excludeBranches;
- private final DN baseDN;
- private final EntryContainer srcEntryContainer;
- private final EntryContainer entryContainer;
- private final Object synchObject = new Object();
- private static final int PARENT_ID_SET_SIZE = 16 * 1024;
- private final ConcurrentHashMap<DN, CountDownLatch> pendingMap = new ConcurrentHashMap<>();
- private final Set<DN> parentSet = new HashSet<>(PARENT_ID_SET_SIZE);
- private final List<AttributeIndex> attributeIndexes = new ArrayList<>();
- private final List<VLVIndex> vlvIndexes = new ArrayList<>();
- private boolean processID2Children, processID2Subtree;
-
- /**
- * Creates a suffix instance using the specified parameters.
- *
- * @param entryContainer The entry container pertaining to the suffix.
- * @param srcEntryContainer The original entry container.
- * @param includeBranches The include branches.
- * @param excludeBranches The exclude branches.
- */
- Suffix(EntryContainer entryContainer, EntryContainer srcEntryContainer,
- List<DN> includeBranches, List<DN> excludeBranches)
- {
- this.entryContainer = entryContainer;
- this.srcEntryContainer = srcEntryContainer;
- this.baseDN = entryContainer.getBaseDN();
- if (includeBranches != null)
- {
- this.includeBranches = includeBranches;
- }
- else
- {
- this.includeBranches = new ArrayList<>(0);
- }
- if (excludeBranches != null)
- {
- this.excludeBranches = excludeBranches;
- }
- else
- {
- this.excludeBranches = new ArrayList<>(0);
- }
- }
-
- /**
- * Returns the DN2ID instance pertaining to a suffix instance.
- *
- * @return A DN2ID instance that can be used to manipulate the DN2ID database.
- */
- public DN2ID getDN2ID()
- {
- return entryContainer.getDN2ID();
- }
-
-
- /**
- * Returns the ID2Entry instance pertaining to a suffix instance.
- *
- * @return A ID2Entry instance that can be used to manipulate the ID2Entry
- * database.
- */
- public ID2Entry getID2Entry()
- {
- return entryContainer.getID2Entry();
- }
-
-
- /**
- * Returns the DN2URI instance pertaining to a suffix instance.
- *
- * @return A DN2URI instance that can be used to manipulate the DN2URI
- * database.
- */
- public DN2URI getDN2URI()
- {
- return entryContainer.getDN2URI();
- }
-
-
- /**
- * Returns the entry container pertaining to a suffix instance.
- *
- * @return The entry container used to create a suffix instance.
- */
- public EntryContainer getEntryContainer()
- {
- return entryContainer;
- }
-
-
- /**
- * Returns a map associating the attribute types with their corresponding attribute indexes.
- * The map contains only trusted indexes.
- *
- * @return a map associating the attribute types with their corresponding trusted attribute indexes.
- */
- public List<AttributeIndex> getAttributeIndexes()
- {
- return attributeIndexes;
- }
-
- /**
- * Returns the list of trusted VLV indexes.
- *
- * @return the list of trusted VLV indexes.
- */
- public List<VLVIndex> getVLVIndexes()
- {
- return vlvIndexes;
- }
-
- /**
- * Make sure the specified parent DN is not in the pending map.
- *
- * @param parentDN The DN of the parent.
- */
- private void assureNotPending(DN parentDN) throws InterruptedException
- {
- final CountDownLatch l = pendingMap.get(parentDN);
- if (l != null)
- {
- l.await();
- }
- }
-
-
- /**
- * Add specified DN to the pending map.
- *
- * @param dn The DN to add to the map.
- */
- public void addPending(DN dn)
- {
- pendingMap.putIfAbsent(dn, new CountDownLatch(1));
- }
-
-
- /**
- * Remove the specified DN from the pending map, it may not exist if the
- * entries are being migrated so just return.
- *
- * @param dn The DN to remove from the map.
- */
- public void removePending(DN dn)
- {
- CountDownLatch l = pendingMap.remove(dn);
- if(l != null)
- {
- l.countDown();
- }
- }
-
-
- /**
- * Return {@code true} if the specified dn is contained in the parent set, or
- * in the specified DN cache. This would indicate that the parent has already
- * been processed. It returns {@code false} otherwise.
- *
- * It will optionally check the dn2id database for the dn if the specified
- * cleared backend boolean is {@code true}.
- *
- * @param dn The DN to check for.
- * @param dnCache The importer DN cache.
- * @param clearedBackend Set to {@code true} if the import process cleared the
- * backend before processing.
- * @return {@code true} if the dn is contained in the parent ID, or
- * {@code false} otherwise.
- *
- * @throws DatabaseException If an error occurred searching the DN cache, or
- * dn2id database.
- * @throws InterruptedException If an error occurred processing the pending
- * map.
- */
- public
- boolean isParentProcessed(DN dn, DNCache dnCache, boolean clearedBackend)
- throws DatabaseException, InterruptedException {
- synchronized(synchObject) {
- if(parentSet.contains(dn))
- {
- return true;
- }
- }
- //The DN was not in the parent set. Make sure it isn't pending.
- try {
- assureNotPending(dn);
- } catch (InterruptedException e) {
- logger.error(ERR_IMPORT_LDIF_PENDING_ERR, e.getMessage());
- throw e;
- }
- // Either parent is in the DN cache,
- // or else check the dn2id database for the DN (only if backend wasn't cleared)
- final boolean parentThere = dnCache.contains(dn)
- || (!clearedBackend
- && getDN2ID().get(null, dn, LockMode.DEFAULT) != null);
- //Add the DN to the parent set if needed.
- if (parentThere) {
- synchronized(synchObject) {
- if (parentSet.size() >= PARENT_ID_SET_SIZE) {
- Iterator<DN> iterator = parentSet.iterator();
- iterator.next();
- iterator.remove();
- }
- parentSet.add(dn);
- }
- }
- return parentThere;
- }
-
-
- final boolean isProcessID2Children()
- {
- return processID2Children;
- }
-
- final boolean isProcessID2Subtree()
- {
- return processID2Subtree;
- }
-
- /**
- * Sets the trusted status of all of the indexes, vlvIndexes, id2children
- * and id2subtree indexes.
- *
- * @throws DatabaseException If an error occurred setting the indexes to
- * trusted.
- */
- public void setIndexesTrusted() throws DatabaseException
- {
- if (processID2Children)
- {
- entryContainer.getID2Children().setTrusted(null, true);
- }
- if (processID2Subtree)
- {
- entryContainer.getID2Subtree().setTrusted(null, true);
- }
- for (AttributeIndex attrIndex : attributeIndexes)
- {
- setTrusted(attrIndex, true);
- }
- for (VLVIndex vlvIdx : vlvIndexes)
- {
- vlvIdx.setTrusted(null, true);
- }
- }
-
- /**
- * Build the lists of indexes to process and set their status as not trusted.
- * ID2Children and ID2Subtree are also considered, albeit as special cases.
- *
- * @param onlyDegraded
- * true if currently untrusted indexes should be processed as well.
- * @throws DatabaseException
- * If an error occurred setting the indexes to trusted.
- */
- public void setIndexesNotTrusted(boolean onlyDegraded) throws DatabaseException
- {
- setNotTrustedDN2IDRelatedIndexes(onlyDegraded);
- for (AttributeIndex attributeIndex : entryContainer.getAttributeIndexes())
- {
- if (!onlyDegraded || attributeIndex.isTrusted())
- {
- attributeIndexes.add(attributeIndex);
- setTrusted(attributeIndex, false);
- }
- }
- for (VLVIndex vlvIndex : entryContainer.getVLVIndexes())
- {
- if (!onlyDegraded || vlvIndex.isTrusted())
- {
- vlvIndex.setTrusted(null, false);
- vlvIndexes.add(vlvIndex);
- }
- }
- }
-
- private void setNotTrustedDN2IDRelatedIndexes(boolean onlyDegraded)
- {
- if (setNotTrustedDN2IDRelated(entryContainer.getID2Children(), onlyDegraded))
- {
- processID2Children = true;
- }
- if (setNotTrustedDN2IDRelated(entryContainer.getID2Subtree(), onlyDegraded))
- {
- processID2Subtree = true;
- }
- }
-
- private boolean setNotTrustedDN2IDRelated(Index auxIndex, boolean onlyDegraded)
- {
- if (!onlyDegraded || auxIndex.isTrusted())
- {
- auxIndex.setTrusted(null, false);
- return true;
- }
- return false;
- }
-
- private void setTrusted(AttributeIndex attrIndex, boolean trusted)
- {
- for (Index index : attrIndex.getAllIndexes())
- {
- if (index != null)
- {
- index.setTrusted(null, trusted);
- }
- }
- }
-
- void forceTrustedDN2IDRelated(boolean trusted)
- {
- entryContainer.getID2Children().setTrusted(null, trusted);
- entryContainer.getID2Subtree().setTrusted(null, trusted);
- if (!trusted)
- {
- processID2Subtree = true;
- processID2Children = true;
- }
- }
-
- /**
- * Return a src entry container.
- *
- * @return The src entry container.
- */
- public EntryContainer getSrcEntryContainer()
- {
- return this.srcEntryContainer;
- }
-
-
- /**
- * Return include branches.
- *
- * @return The include branches.
- */
- public List<DN> getIncludeBranches()
- {
- return this.includeBranches;
- }
-
-
- /**
- * Return exclude branches.
- *
- * @return the exclude branches.
- */
- public List<DN> getExcludeBranches()
- {
- return this.excludeBranches;
- }
-
-
- /**
- * Return base DN.
- *
- * @return The base DN.
- */
- public DN getBaseDN()
- {
- return this.baseDN;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VLVIndex.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VLVIndex.java
deleted file mode 100644
index ae7ef43..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VLVIndex.java
+++ /dev/null
@@ -1,1410 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.messages.BackendMessages.*;
-import static org.opends.server.util.StaticUtils.*;
-
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.config.server.ConfigChangeResult;
-import org.forgerock.opendj.config.server.ConfigException;
-import org.forgerock.opendj.ldap.ByteSequence;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.opendj.ldap.SearchScope;
-import org.forgerock.opendj.ldap.schema.MatchingRule;
-import org.opends.server.admin.server.ConfigurationChangeListener;
-import org.opends.server.admin.std.meta.LocalDBVLVIndexCfgDefn.Scope;
-import org.opends.server.admin.std.server.LocalDBVLVIndexCfg;
-import org.opends.server.controls.ServerSideSortRequestControl;
-import org.opends.server.controls.VLVRequestControl;
-import org.opends.server.controls.VLVResponseControl;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.core.SearchOperation;
-import org.opends.server.protocols.ldap.LDAPResultCode;
-import org.opends.server.types.Attribute;
-import org.opends.server.types.AttributeType;
-import org.opends.server.types.DN;
-import org.opends.server.types.DirectoryException;
-import org.opends.server.types.Entry;
-import org.opends.server.types.Modification;
-import org.opends.server.types.SearchFilter;
-import org.opends.server.types.SortKey;
-import org.opends.server.types.SortOrder;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.*;
-
-/**
- * This class represents a VLV index. Each database record is a sorted list
- * of entry IDs followed by sets of attribute values used to sort the entries.
- * The entire set of entry IDs are broken up into sorted subsets to decrease
- * the number of database retrievals needed for a range lookup. The records are
- * keyed by the last entry's first sort attribute value. The list of entries
- * in a particular database record maintains the property where the first sort
- * attribute value is bigger then the previous key but smaller or equal
- * to its own key.
- */
-public class VLVIndex extends DatabaseContainer
- implements ConfigurationChangeListener<LocalDBVLVIndexCfg>
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The comparator for vlvIndex keys. */
- public VLVKeyComparator comparator;
- /** The limit on the number of entry IDs that may be indexed by one key. */
- private int sortedSetCapacity = 4000;
- /** The SortOrder in use by this VLV index to sort the entries. */
- public SortOrder sortOrder;
-
- /** The cached count of entries in this index. */
- private final AtomicInteger count;
-
- private final State state;
- /**
- * A flag to indicate if this vlvIndex should be trusted to be consistent
- * with the entries database.
- */
- private boolean trusted;
-
- /** The VLV vlvIndex configuration. */
- private LocalDBVLVIndexCfg config;
-
- private DN baseDN;
- private SearchFilter filter;
- private SearchScope scope;
-
-
- /**
- * Create a new VLV vlvIndex object.
- *
- * @param config The VLV index config object to use for this VLV
- * index.
- * @param state The state database to persist vlvIndex state info.
- * @param env The JE Environment
- * @param entryContainer The database entryContainer holding this vlvIndex.
- * @throws com.sleepycat.je.DatabaseException
- * If an error occurs in the JE database.
- * @throws ConfigException if a error occurs while reading the VLV index
- * configuration
- */
- VLVIndex(LocalDBVLVIndexCfg config, State state, Environment env, EntryContainer entryContainer)
- throws DatabaseException, ConfigException
- {
- super(entryContainer.getDatabasePrefix()+"_vlv."+config.getName(),
- env, entryContainer);
-
- this.config = config;
- this.baseDN = config.getBaseDN();
- this.scope = convertScope(config.getScope());
- this.sortedSetCapacity = config.getMaxBlockSize();
-
- try
- {
- this.filter = SearchFilter.createFilterFromString(config.getFilter());
- }
- catch(Exception e)
- {
- throw new ConfigException(ERR_CONFIG_VLV_INDEX_BAD_FILTER.get(
- config.getFilter(), name, stackTraceToSingleLineString(e)));
- }
-
- String[] sortAttrs = config.getSortOrder().split(" ");
- SortKey[] sortKeys = new SortKey[sortAttrs.length];
- MatchingRule[] orderingRules = new MatchingRule[sortAttrs.length];
- boolean[] ascending = new boolean[sortAttrs.length];
- for(int i = 0; i < sortAttrs.length; i++)
- {
- try
- {
- if(sortAttrs[i].startsWith("-"))
- {
- ascending[i] = false;
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- else
- {
- ascending[i] = true;
- if(sortAttrs[i].startsWith("+"))
- {
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- }
- }
- catch(Exception e)
- {
- throw new ConfigException(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortKeys[i], name));
- }
-
- AttributeType attrType =
- DirectoryServer.getAttributeTypeOrNull(sortAttrs[i].toLowerCase());
- if(attrType == null)
- {
- throw new ConfigException(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortAttrs[i], name));
- }
- sortKeys[i] = new SortKey(attrType, ascending[i]);
- orderingRules[i] = attrType.getOrderingMatchingRule();
- }
-
- this.sortOrder = new SortOrder(sortKeys);
- this.comparator = new VLVKeyComparator(orderingRules, ascending);
-
- this.dbConfig = JEBUtils.toDatabaseConfigNoDuplicates(env);
- this.dbConfig.setOverrideBtreeComparator(true);
- this.dbConfig.setBtreeComparator(this.comparator);
-
- this.state = state;
-
- this.trusted = state.getIndexTrustState(null, this);
- if (!trusted && entryContainer.getHighestEntryID().longValue() == 0)
- {
- // If there are no entries in the entry container then there
- // is no reason why this vlvIndex can't be upgraded to trusted.
- setTrusted(null, true);
- }
-
- this.count = new AtomicInteger(0);
- this.config.addChangeListener(this);
- }
-
- private SearchScope convertScope(final Scope cfgScope)
- {
- switch (cfgScope)
- {
- case BASE_OBJECT:
- return SearchScope.BASE_OBJECT;
- case SINGLE_LEVEL:
- return SearchScope.SINGLE_LEVEL;
- case SUBORDINATE_SUBTREE:
- return SearchScope.SUBORDINATES;
- default: // WHOLE_SUBTREE
- return SearchScope.WHOLE_SUBTREE;
- }
- }
-
- /** {@inheritDoc} */
- @Override
- public void open() throws DatabaseException
- {
- super.open();
-
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- LockMode lockMode = LockMode.RMW;
-
- Cursor cursor = openCursor(null, CursorConfig.READ_COMMITTED);
- try
- {
- OperationStatus status = cursor.getFirst(key, data,lockMode);
- while(status == OperationStatus.SUCCESS)
- {
- count.getAndAdd(SortValuesSet.getEncodedSize(data.getData(), 0));
- status = cursor.getNext(key, data, lockMode);
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Close the VLV index.
- *
- * @throws DatabaseException if a JE database error occurs while
- * closing the index.
- */
- @Override
- public void close() throws DatabaseException
- {
- super.close();
- this.config.removeChangeListener(this);
- }
-
- /**
- * Update the vlvIndex for a new entry.
- *
- * @param txn A database transaction, or null if none is required.
- * @param entryID The entry ID.
- * @param entry The entry to be indexed.
- * @return True if the entry ID for the entry are added. False if
- * the entry ID already exists.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws org.opends.server.types.DirectoryException If a Directory Server
- * error occurs.
- * @throws JebException If an error occurs in the JE backend.
- */
- public boolean addEntry(Transaction txn, EntryID entryID, Entry entry)
- throws DatabaseException, DirectoryException, JebException
- {
- return shouldInclude(entry)
- && insertValues(txn, entryID.longValue(), entry);
- }
-
- /**
- * Update the vlvIndex for a new entry.
- *
- * @param buffer The index buffer to buffer the changes.
- * @param entryID The entry ID.
- * @param entry The entry to be indexed.
- * @return True if the entry ID for the entry are added. False if
- * the entry ID already exists.
- * @throws DirectoryException If a Directory Server
- * error occurs.
- */
- boolean addEntry(IndexBuffer buffer, EntryID entryID, Entry entry) throws DirectoryException
- {
- if (shouldInclude(entry))
- {
- final SortValues sortValues = new SortValues(entryID, entry, sortOrder);
- buffer.getVLVIndex(this).addValues(sortValues);
- return true;
- }
- return false;
- }
-
- /**
- * Update the vlvIndex for a deleted entry.
- *
- * @param buffer The database transaction to be used for the deletions
- * @param entryID The entry ID
- * @param entry The contents of the deleted entry.
- * @return True if the entry was successfully removed from this VLV index
- * or False otherwise.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- boolean removeEntry(IndexBuffer buffer, EntryID entryID, Entry entry) throws DirectoryException
- {
- if (shouldInclude(entry))
- {
- final SortValues sortValues = new SortValues(entryID, entry, sortOrder);
- buffer.getVLVIndex(this).deleteValues(sortValues);
- return true;
- }
- return false;
- }
-
- /**
- * Update the vlvIndex to reflect a sequence of modifications in a Modify
- * operation.
- *
- * @param buffer The database transaction to be used for the deletions
- * @param entryID The ID of the entry that was modified.
- * @param oldEntry The entry before the modifications were applied.
- * @param newEntry The entry after the modifications were applied.
- * @param mods The sequence of modifications in the Modify operation.
- * @return True if the modification was successfully processed or False
- * otherwise.
- * @throws DatabaseException If an error occurs during an operation on a
- * JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- boolean modifyEntry(IndexBuffer buffer,
- EntryID entryID,
- Entry oldEntry,
- Entry newEntry,
- List<Modification> mods)
- throws DatabaseException, DirectoryException
- {
- if (shouldInclude(oldEntry))
- {
- if (shouldInclude(newEntry))
- {
- // The entry should still be indexed. See if any sorted attributes are changed.
- if (isSortAttributeModified(mods))
- {
- // Sorted attributes have changed. Reindex the entry;
- boolean success;
- success = removeEntry(buffer, entryID, oldEntry);
- success &= addEntry(buffer, entryID, newEntry);
- return success;
- }
- }
- else
- {
- // The modifications caused the new entry to be unindexed.
- return removeEntry(buffer, entryID, oldEntry);
- }
- }
- else
- {
- if (shouldInclude(newEntry))
- {
- // The modifications caused the new entry to be indexed. Add to vlvIndex
- return addEntry(buffer, entryID, newEntry);
- }
- }
-
- // The modifications does not affect this vlvIndex
- return true;
- }
-
- private boolean isSortAttributeModified(List<Modification> mods)
- {
- for (SortKey sortKey : sortOrder.getSortKeys())
- {
- AttributeType attributeType = sortKey.getAttributeType();
- List<AttributeType> subTypes = DirectoryServer.getSchema().getSubTypes(attributeType);
- for (Modification mod : mods)
- {
- AttributeType modAttrType = mod.getAttribute().getAttributeType();
- if (modAttrType.equals(attributeType)
- || subTypes.contains(modAttrType))
- {
- return true;
- }
- }
- }
- return false;
- }
-
- /**
- * Get a sorted values set that should contain the entry with the given
- * information.
- *
- * @param txn The transaction to use when retrieving the set or NULL if it is
- * not required.
- * @param entryID The entry ID to use.
- * @param values The values to use.
- * @param types The types of the values to use.
- * @return The SortValuesSet that should contain the entry with the given
- * information.
- * @throws DatabaseException If an error occurs during an operation on a
- * JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- SortValuesSet getSortValuesSet(Transaction txn, long entryID,
- ByteString[] values, AttributeType[] types) throws DatabaseException,
- DirectoryException
- {
- DatabaseEntry key = new DatabaseEntry(encodeKey(entryID, values, types));
- DatabaseEntry data = new DatabaseEntry();
- return getSortValuesSet(txn, key, data, LockMode.DEFAULT);
- }
-
- private SortValuesSet getSortValuesSet(Transaction txn, DatabaseEntry key,
- DatabaseEntry data, LockMode lockMode)
- {
- OperationStatus status = getSearchKeyRange(txn, key, data, lockMode);
- if (status != OperationStatus.SUCCESS)
- {
- // There are no records in the database
- if (logger.isTraceEnabled())
- {
- logger.trace("No sort values set exist in VLV vlvIndex %s. "
- + "Creating unbound set.", config.getName());
- }
- // this could not be found, so clean the key for later reuse
- key.setData(new byte[0]);
- return new SortValuesSet(this);
- }
-
- if (logger.isTraceEnabled())
- {
- logSearchKeyResult(key);
- }
- return new SortValuesSet(key.getData(), data.getData(), this);
- }
-
- private void logSearchKeyResult(DatabaseEntry key)
- {
- StringBuilder searchKeyHex = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(searchKeyHex, key.getData(), 4);
- StringBuilder foundKeyHex = new StringBuilder();
- StaticUtils.byteArrayToHexPlusAscii(foundKeyHex, key.getData(), 4);
- logger.trace("Retrieved a sort values set in VLV vlvIndex %s\n" +
- "Search Key:%s\nFound Key:%s\n",
- config.getName(), searchKeyHex, foundKeyHex);
- }
-
- /**
- * Search for entries matching the entry ID and attribute values and
- * return its entry ID.
- *
- * @param txn The JE transaction to use for database updates.
- * @param entryID The entry ID to search for.
- * @param values The values to search for.
- * @param types The types of the values to search for.
- * @return The index of the entry ID matching the values or -1 if its not
- * found.
- * @throws DatabaseException If an error occurs during an operation on a
- * JE database.
- * @throws JebException If an error occurs during an operation on a
- * JE database.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- boolean containsValues(Transaction txn, long entryID,
- ByteString[] values, AttributeType[] types) throws JebException,
- DatabaseException, DirectoryException
- {
- SortValuesSet valuesSet = getSortValuesSet(txn, entryID, values, types);
- int pos = valuesSet.binarySearch(entryID, values);
- return pos >= 0;
- }
-
- private boolean insertValues(Transaction txn, long entryID, Entry entry)
- throws JebException, DatabaseException, DirectoryException
- {
- ByteString[] values = getSortValues(entry);
- AttributeType[] types = getSortTypes();
- DatabaseEntry key = new DatabaseEntry(encodeKey(entryID, values, types));
- DatabaseEntry data = new DatabaseEntry();
-
- SortValuesSet sortValuesSet =
- getSortValuesSet(txn, key, data, LockMode.RMW);
- boolean success = sortValuesSet.add(entryID, values, types);
-
- int newSize = sortValuesSet.size();
- if(newSize >= sortedSetCapacity)
- {
- SortValuesSet splitSortValuesSet = sortValuesSet.split(newSize / 2);
- put(txn, key, data, splitSortValuesSet); // splitAfter
- put(txn, key, data, sortValuesSet); // after
-
- if(logger.isTraceEnabled())
- {
- logger.trace("SortValuesSet with key %s has reached" +
- " the entry size of %d. Spliting into two sets with " +
- " keys %s and %s.", splitSortValuesSet.getKeySortValues(),
- newSize, sortValuesSet.getKeySortValues(),
- splitSortValuesSet.getKeySortValues());
- }
- }
- else
- {
- data.setData(sortValuesSet.toDatabase()); // after
- put(txn, key, data);
- // TODO: What about phantoms?
- }
-
- if(success)
- {
- count.getAndIncrement();
- }
-
- return success;
- }
-
- private void put(Transaction txn, DatabaseEntry key, DatabaseEntry data,
- SortValuesSet set) throws DirectoryException
- {
- key.setData(set.getKeyBytes());
- data.setData(set.toDatabase());
- put(txn, key, data);
- }
-
- /**
- * Gets the types of the attribute values to sort.
- *
- * @return The types of the attribute values to sort on.
- */
- AttributeType[] getSortTypes()
- {
- SortKey[] sortKeys = sortOrder.getSortKeys();
- AttributeType[] types = new AttributeType[sortKeys.length];
- for (int i = 0; i < sortKeys.length; i++)
- {
- types[i] = sortKeys[i].getAttributeType();
- }
- return types;
- }
-
- private OperationStatus getSearchKeyRange(Transaction txn, DatabaseEntry key,
- DatabaseEntry data, LockMode lockMode)
- {
- Cursor cursor = openCursor(txn, CursorConfig.READ_COMMITTED);
- try
- {
- return cursor.getSearchKeyRange(key, data, lockMode);
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Update the vlvIndex with the specified values to add and delete.
- *
- * @param txn A database transaction, or null if none is required.
- * @param addedValues The values to add to the VLV index.
- * @param deletedValues The values to delete from the VLV index.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If a Directory Server
- * error occurs.
- */
- void updateIndex(Transaction txn, TreeSet<SortValues> addedValues, TreeSet<SortValues> deletedValues)
- throws DirectoryException, DatabaseException
- {
- // Handle cases where nothing is changed early to avoid
- // DB access.
- if((addedValues == null || addedValues.isEmpty()) &&
- (deletedValues == null || deletedValues.isEmpty()))
- {
- return;
- }
-
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- Iterator<SortValues> aValues = null;
- Iterator<SortValues> dValues = null;
- SortValues av = null;
- SortValues dv = null;
-
- if(addedValues != null)
- {
- aValues = addedValues.iterator();
- av = aValues.next();
- }
- if(deletedValues != null)
- {
- dValues = deletedValues.iterator();
- dv = dValues.next();
- }
-
- while(true)
- {
- if(av != null)
- {
- if(dv != null)
- {
- // Start from the smallest values from either set.
- if(av.compareTo(dv) < 0)
- {
- key.setData(encodeKey(av));
- }
- else
- {
- key.setData(encodeKey(dv));
- }
- }
- else
- {
- key.setData(encodeKey(av));
- }
- }
- else if(dv != null)
- {
- key.setData(encodeKey(dv));
- }
- else
- {
- break;
- }
-
- final SortValuesSet sortValuesSet = getSortValuesSet(txn, key, data, LockMode.RMW);
- int oldSize = sortValuesSet.size();
- if(key.getData().length == 0)
- {
- // This is the last unbounded set.
- while(av != null)
- {
- sortValuesSet.add(av);
- av = moveToNextSortValues(aValues);
- }
-
- while(dv != null)
- {
- sortValuesSet.remove(dv);
- dv = moveToNextSortValues(dValues);
- }
- }
- else
- {
- SortValues maxValues = decodeKey(sortValuesSet.getKeyBytes());
-
- while(av != null && av.compareTo(maxValues) <= 0)
- {
- sortValuesSet.add(av);
- av = moveToNextSortValues(aValues);
- }
-
- while(dv != null && dv.compareTo(maxValues) <= 0)
- {
- sortValuesSet.remove(dv);
- dv = moveToNextSortValues(dValues);
- }
- }
-
- int newSize = sortValuesSet.size();
- if(newSize >= sortedSetCapacity)
- {
- SortValuesSet splitSortValuesSet = sortValuesSet.split(newSize / 2);
- put(txn, key, data, splitSortValuesSet); // splitAfter
- put(txn, key, data, sortValuesSet); // after
-
- if(logger.isTraceEnabled())
- {
- logger.trace("SortValuesSet with key %s has reached" +
- " the entry size of %d. Spliting into two sets with " +
- " keys %s and %s.", splitSortValuesSet.getKeySortValues(),
- newSize, sortValuesSet.getKeySortValues(),
- splitSortValuesSet.getKeySortValues());
- }
- }
- else if(newSize == 0)
- {
- delete(txn, key);
- }
- else
- {
- byte[] after = sortValuesSet.toDatabase();
- data.setData(after);
- put(txn, key, data);
- }
-
- count.getAndAdd(newSize - oldSize);
- }
- }
-
- private SortValues moveToNextSortValues(Iterator<SortValues> sortValues)
- {
- sortValues.remove();
- if (sortValues.hasNext())
- {
- return sortValues.next();
- }
- return null;
- }
-
- private byte[] encodeKey(SortValues sv) throws DirectoryException
- {
- return encodeKey(sv.getEntryID(), sv.getValues(), sv.getTypes());
- }
-
- /**
- * Evaluate a search with sort control using this VLV index.
- *
- * @param txn The transaction to used when reading the index or NULL if it is
- * not required.
- * @param searchOperation The search operation to evaluate.
- * @param sortControl The sort request control to evaluate.
- * @param vlvRequest The VLV request control to evaluate or NULL if VLV is not
- * requested.
- * @param debugBuilder If not null, a diagnostic string will be written
- * which will help determine how this index contributed
- * to this search.
- * @return The sorted EntryIDSet containing the entry IDs that match the
- * search criteria.
- * @throws DirectoryException If a Directory Server error occurs.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- EntryIDSet evaluate(Transaction txn,
- SearchOperation searchOperation,
- ServerSideSortRequestControl sortControl,
- VLVRequestControl vlvRequest,
- StringBuilder debugBuilder)
- throws DirectoryException, DatabaseException
- {
- if (!trusted
- || !searchOperation.getBaseDN().equals(baseDN)
- || !searchOperation.getScope().equals(scope)
- || !searchOperation.getFilter().equals(filter)
- || !sortControl.getSortOrder().equals(sortOrder))
- {
- return null;
- }
-
- if (debugBuilder != null)
- {
- debugBuilder.append("vlv=");
- debugBuilder.append("[INDEX:");
- debugBuilder.append(name.replace(entryContainer.getDatabasePrefix() + "_", ""));
- debugBuilder.append("]");
- }
-
- long[] selectedIDs = new long[0];
- if(vlvRequest != null)
- {
- int currentCount = count.get();
- int beforeCount = vlvRequest.getBeforeCount();
- int afterCount = vlvRequest.getAfterCount();
-
- if (vlvRequest.getTargetType() == VLVRequestControl.TYPE_TARGET_BYOFFSET)
- {
- int targetOffset = vlvRequest.getOffset();
- if (targetOffset < 0)
- {
- // The client specified a negative target offset. This should never
- // be allowed.
- searchOperation.addResponseControl(
- new VLVResponseControl(targetOffset, currentCount,
- LDAPResultCode.OFFSET_RANGE_ERROR));
-
- LocalizableMessage message = ERR_ENTRYIDSORTER_NEGATIVE_START_POS.get();
- throw new DirectoryException(ResultCode.VIRTUAL_LIST_VIEW_ERROR,
- message);
- }
- else if (targetOffset == 0)
- {
- // This is an easy mistake to make, since VLV offsets start at 1
- // instead of 0. We'll assume the client meant to use 1.
- targetOffset = 1;
- }
- int listOffset = targetOffset - 1; // VLV offsets start at 1, not 0.
- int startPos = listOffset - beforeCount;
- if (startPos < 0)
- {
- // This can happen if beforeCount >= offset, and in this case we'll
- // just adjust the start position to ignore the range of beforeCount
- // that doesn't exist.
- startPos = 0;
- beforeCount = listOffset;
- }
- else if(startPos >= currentCount)
- {
- // The start position is beyond the end of the list. In this case,
- // we'll assume that the start position was one greater than the
- // size of the list and will only return the beforeCount entries.
- // The start position is beyond the end of the list. In this case,
- // we'll assume that the start position was one greater than the
- // size of the list and will only return the beforeCount entries.
- targetOffset = currentCount + 1;
- listOffset = currentCount;
- startPos = listOffset - beforeCount;
- afterCount = 0;
- }
-
- int count = 1 + beforeCount + afterCount;
- selectedIDs = new long[count];
-
- Cursor cursor = openCursor(txn, CursorConfig.READ_COMMITTED);
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- LockMode lockMode = LockMode.DEFAULT;
- //Locate the set that contains the target entry.
- int cursorCount = 0;
- int selectedPos = 0;
- OperationStatus status = cursor.getFirst(key, data, lockMode);
- while(status == OperationStatus.SUCCESS)
- {
- if(logger.isTraceEnabled())
- {
- logSearchKeyResult(key);
- }
- long[] IDs = SortValuesSet.getEncodedIDs(data.getData(), 0);
- for(int i = startPos + selectedPos - cursorCount;
- i < IDs.length && selectedPos < count;
- i++, selectedPos++)
- {
- selectedIDs[selectedPos] = IDs[i];
- }
- cursorCount += IDs.length;
- status = cursor.getNext(key, data,lockMode);
- }
-
- if (selectedPos < count)
- {
- // We don't have enough entries in the set to meet the requested
- // page size, so we'll need to shorten the array.
- long[] newIDArray = new long[selectedPos];
- System.arraycopy(selectedIDs, 0, newIDArray, 0, selectedPos);
- selectedIDs = newIDArray;
- }
-
- searchOperation.addResponseControl(
- new VLVResponseControl(targetOffset, currentCount,
- LDAPResultCode.SUCCESS));
-
- if(debugBuilder != null)
- {
- debugBuilder.append("[COUNT:");
- debugBuilder.append(cursorCount);
- debugBuilder.append("]");
- }
- }
- finally
- {
- cursor.close();
- }
- }
- else
- {
- int targetOffset = 0;
- int includedBeforeCount = 0;
- int includedAfterCount = 0;
- LinkedList<EntryID> idList = new LinkedList<>();
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- Cursor cursor = openCursor(txn, CursorConfig.READ_COMMITTED);
- try
- {
- LockMode lockMode = LockMode.DEFAULT;
- ByteSequence vBytes = vlvRequest.getGreaterThanOrEqualAssertion();
- ByteStringBuilder keyBytes =
- new ByteStringBuilder(vBytes.length() + 4);
- keyBytes.appendBERLength(vBytes.length());
- vBytes.copyTo(keyBytes);
-
- key.setData(keyBytes.getBackingArray(), 0, keyBytes.length());
- OperationStatus status = cursor.getSearchKeyRange(key, data, lockMode);
- if(status == OperationStatus.SUCCESS)
- {
- if(logger.isTraceEnabled())
- {
- logSearchKeyResult(key);
- }
- SortValuesSet sortValuesSet =
- new SortValuesSet(key.getData(), data.getData(), this);
-
- int adjustedTargetOffset = sortValuesSet.binarySearch(
- -1, vlvRequest.getGreaterThanOrEqualAssertion());
- if(adjustedTargetOffset < 0)
- {
- // For a negative return value r, the vlvIndex -(r+1) gives the
- // array index of the ID that is greater then the assertion value.
- adjustedTargetOffset = -(adjustedTargetOffset+1);
- }
-
- targetOffset = adjustedTargetOffset;
-
- // Iterate through all the sort values sets before this one to find
- // the target offset in the index.
- int lastOffset = adjustedTargetOffset - 1;
- long[] lastIDs = sortValuesSet.getEntryIDs();
- while(true)
- {
- for(int i = lastOffset;
- i >= 0 && includedBeforeCount < beforeCount; i--)
- {
- idList.addFirst(new EntryID(lastIDs[i]));
- includedBeforeCount++;
- }
-
- status = cursor.getPrev(key, data, lockMode);
- if(status != OperationStatus.SUCCESS)
- {
- break;
- }
-
- if(includedBeforeCount < beforeCount)
- {
- lastIDs = SortValuesSet.getEncodedIDs(data.getData(), 0);
- lastOffset = lastIDs.length - 1;
- targetOffset += lastIDs.length;
- }
- else
- {
- targetOffset += SortValuesSet.getEncodedSize(data.getData(), 0);
- }
- }
-
-
- // Set the cursor back to the position of the target entry set
- key.setData(sortValuesSet.getKeyBytes());
- cursor.getSearchKey(key, data, lockMode);
-
- // Add the target and after count entries if the target was found.
- lastOffset = adjustedTargetOffset;
- lastIDs = sortValuesSet.getEntryIDs();
- int afterIDCount = 0;
- while(true)
- {
- for(int i = lastOffset;
- i < lastIDs.length && includedAfterCount < afterCount + 1;
- i++)
- {
- idList.addLast(new EntryID(lastIDs[i]));
- includedAfterCount++;
- }
-
- if(includedAfterCount >= afterCount + 1)
- {
- break;
- }
-
- status = cursor.getNext(key, data, lockMode);
- if(status != OperationStatus.SUCCESS)
- {
- break;
- }
-
- lastIDs = SortValuesSet.getEncodedIDs(data.getData(), 0);
- lastOffset = 0;
- afterIDCount += lastIDs.length;
- }
-
- selectedIDs = new long[idList.size()];
- Iterator<EntryID> idIterator = idList.iterator();
- for (int i=0; i < selectedIDs.length; i++)
- {
- selectedIDs[i] = idIterator.next().longValue();
- }
-
- searchOperation.addResponseControl(
- new VLVResponseControl(targetOffset + 1, currentCount,
- LDAPResultCode.SUCCESS));
-
- if(debugBuilder != null)
- {
- debugBuilder.append("[COUNT:");
- debugBuilder.append(targetOffset + afterIDCount + 1);
- debugBuilder.append("]");
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
- }
- else
- {
- LinkedList<long[]> idSets = new LinkedList<>();
- int currentCount = 0;
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- Cursor cursor = openCursor(txn, CursorConfig.READ_COMMITTED);
- try
- {
- LockMode lockMode = LockMode.RMW;
- OperationStatus status = cursor.getFirst(key, data, lockMode);
- while(status == OperationStatus.SUCCESS)
- {
- if(logger.isTraceEnabled())
- {
- logSearchKeyResult(key);
- }
- long[] ids = SortValuesSet.getEncodedIDs(data.getData(), 0);
- idSets.add(ids);
- currentCount += ids.length;
- status = cursor.getNext(key, data, lockMode);
- }
- }
- finally
- {
- cursor.close();
- }
-
- selectedIDs = new long[currentCount];
- int pos = 0;
- for(long[] id : idSets)
- {
- System.arraycopy(id, 0, selectedIDs, pos, id.length);
- pos += id.length;
- }
-
- if(debugBuilder != null)
- {
- debugBuilder.append("[COUNT:");
- debugBuilder.append(currentCount);
- debugBuilder.append("]");
- }
- }
- return new EntryIDSet(selectedIDs, 0, selectedIDs.length);
- }
-
- /**
- * Set the vlvIndex trust state.
- * @param txn A database transaction, or null if none is required.
- * @param trusted True if this vlvIndex should be trusted or false
- * otherwise.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- public synchronized void setTrusted(Transaction txn, boolean trusted)
- throws DatabaseException
- {
- this.trusted = trusted;
- state.putIndexTrustState(txn, this, trusted);
- }
-
- /**
- * Return true iff this index is trusted.
- * @return the trusted state of this index
- */
- public boolean isTrusted()
- {
- return trusted;
- }
-
- /**
- * Gets the values to sort on from the entry.
- *
- * @param entry The entry to get the values from.
- * @return The attribute values to sort on.
- */
- ByteString[] getSortValues(Entry entry)
- {
- SortKey[] sortKeys = sortOrder.getSortKeys();
- ByteString[] values = new ByteString[sortKeys.length];
- for (int i=0; i < sortKeys.length; i++)
- {
- SortKey sortKey = sortKeys[i];
- List<Attribute> attrList = entry.getAttribute(sortKey.getAttributeType());
- if (attrList != null)
- {
- // There may be multiple versions of this attribute in the target entry
- // (e.g., with different sets of options), and it may also be a
- // multivalued attribute. In that case, we need to find the value that
- // is the best match for the corresponding sort key (i.e., for sorting
- // in ascending order, we want to find the lowest value; for sorting in
- // descending order, we want to find the highest value). This is
- // handled by the SortKey.compareValues method.
- ByteString sortValue = null;
- for (Attribute a : attrList)
- {
- for (ByteString v : a)
- {
- if (sortValue == null || sortKey.compareValues(v, sortValue) < 0)
- {
- sortValue = v;
- }
- }
- }
-
- values[i] = sortValue;
- }
- }
- return values;
- }
-
- /**
- * Encode a VLV database key with the given information.
- *
- * @param entryID The entry ID to encode.
- * @param values The values to encode.
- * @param types The types of the values to encode.
- * @return The encoded bytes.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- byte[] encodeKey(long entryID, ByteString[] values, AttributeType[] types)
- throws DirectoryException
- {
- try
- {
- final ByteStringBuilder builder = new ByteStringBuilder();
-
- for (int i = 0; i < values.length; i++)
- {
- final ByteString v = values[i];
- if (v == null)
- {
- builder.appendBERLength(0);
- }
- else
- {
- final MatchingRule eqRule = types[i].getEqualityMatchingRule();
- final ByteString nv = eqRule.normalizeAttributeValue(v);
- builder.appendBERLength(nv.length());
- builder.appendBytes(nv);
- }
- }
- builder.appendLong(entryID);
- builder.trimToSize();
-
- return builder.getBackingArray();
- }
- catch (DecodeException e)
- {
- throw new DirectoryException(
- ResultCode.INVALID_ATTRIBUTE_SYNTAX, e.getMessageObject(), e);
- }
- }
-
- /**
- * Decode a VLV database key.
- *
- * @param keyBytes The byte array to decode.
- * @return The sort values represented by the key bytes.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- private SortValues decodeKey(byte[] keyBytes) throws DirectoryException
- {
- if(keyBytes == null || keyBytes.length == 0)
- {
- return null;
- }
-
- ByteString[] attributeValues = new ByteString[sortOrder.getSortKeys().length];
- int vBytesPos = 0;
-
- for(int i = 0; i < attributeValues.length; i++)
- {
- int valueLength = keyBytes[vBytesPos] & 0x7F;
- if (valueLength != keyBytes[vBytesPos++])
- {
- int valueLengthBytes = valueLength;
- valueLength = 0;
- for (int j=0; j < valueLengthBytes; j++, vBytesPos++)
- {
- valueLength = (valueLength << 8) | (keyBytes[vBytesPos] & 0xFF);
- }
- }
-
- if(valueLength == 0)
- {
- attributeValues[i] = null;
- }
- else
- {
- byte[] valueBytes = new byte[valueLength];
- System.arraycopy(keyBytes, vBytesPos, valueBytes, 0, valueLength);
- attributeValues[i] = ByteString.wrap(valueBytes);
- }
-
- vBytesPos += valueLength;
- }
-
- final long id = JebFormat.toLong(keyBytes, vBytesPos, keyBytes.length);
- return new SortValues(new EntryID(id), attributeValues, sortOrder);
- }
-
- /**
- * Get the sorted set capacity configured for this VLV index.
- *
- * @return The sorted set capacity.
- */
- public int getSortedSetCapacity()
- {
- return sortedSetCapacity;
- }
-
- /**
- * Indicates if the given entry should belong in this VLV index.
- *
- * @param entry The entry to check.
- * @return True if the given entry should belong in this VLV index or False
- * otherwise.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- boolean shouldInclude(Entry entry) throws DirectoryException
- {
- DN entryDN = entry.getName();
- return entryDN.matchesBaseAndScope(baseDN, scope)
- && filter.matchesEntry(entry);
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized boolean isConfigurationChangeAcceptable(
- LocalDBVLVIndexCfg cfg,
- List<LocalizableMessage> unacceptableReasons)
- {
- try
- {
- this.filter = SearchFilter.createFilterFromString(cfg.getFilter());
- }
- catch(Exception e)
- {
- unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_BAD_FILTER.get(
- cfg.getFilter(), name, stackTraceToSingleLineString(e)));
- return false;
- }
-
- String[] sortAttrs = cfg.getSortOrder().split(" ");
- SortKey[] sortKeys = new SortKey[sortAttrs.length];
- MatchingRule[] orderingRules = new MatchingRule[sortAttrs.length];
- boolean[] ascending = new boolean[sortAttrs.length];
- for(int i = 0; i < sortAttrs.length; i++)
- {
- try
- {
- if(sortAttrs[i].startsWith("-"))
- {
- ascending[i] = false;
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- else
- {
- ascending[i] = true;
- if(sortAttrs[i].startsWith("+"))
- {
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- }
- }
- catch(Exception e)
- {
- unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortKeys[i], name));
- return false;
- }
-
- AttributeType attrType = DirectoryServer.getAttributeTypeOrNull(sortAttrs[i].toLowerCase());
- if(attrType == null)
- {
- unacceptableReasons.add(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortAttrs[i], name));
- return false;
- }
- sortKeys[i] = new SortKey(attrType, ascending[i]);
- orderingRules[i] = attrType.getOrderingMatchingRule();
- }
-
- return true;
- }
-
- /** {@inheritDoc} */
- @Override
- public synchronized ConfigChangeResult applyConfigurationChange(
- LocalDBVLVIndexCfg cfg)
- {
- final ConfigChangeResult ccr = new ConfigChangeResult();
-
- // Update base DN only if changed..
- if(!config.getBaseDN().equals(cfg.getBaseDN()))
- {
- this.baseDN = cfg.getBaseDN();
- ccr.setAdminActionRequired(true);
- }
-
- // Update scope only if changed.
- if(!config.getScope().equals(cfg.getScope()))
- {
- this.scope = convertScope(cfg.getScope());
- ccr.setAdminActionRequired(true);
- }
-
- // Update sort set capacity only if changed.
- if (config.getMaxBlockSize() != cfg.getMaxBlockSize())
- {
- this.sortedSetCapacity = cfg.getMaxBlockSize();
-
- // Require admin action only if the new capacity is larger. Otherwise,
- // we will lazyly update the sorted sets.
- if (config.getMaxBlockSize() < cfg.getMaxBlockSize())
- {
- ccr.setAdminActionRequired(true);
- }
- }
-
- // Update the filter only if changed.
- if(!config.getFilter().equals(cfg.getFilter()))
- {
- try
- {
- this.filter = SearchFilter.createFilterFromString(cfg.getFilter());
- ccr.setAdminActionRequired(true);
- }
- catch(Exception e)
- {
- ccr.addMessage(ERR_CONFIG_VLV_INDEX_BAD_FILTER.get(config.getFilter(), name, stackTraceToSingleLineString(e)));
- ccr.setResultCodeIfSuccess(ResultCode.INVALID_ATTRIBUTE_SYNTAX);
- }
- }
-
- // Update the sort order only if changed.
- if (!config.getSortOrder().equals(cfg.getSortOrder()))
- {
- String[] sortAttrs = cfg.getSortOrder().split(" ");
- SortKey[] sortKeys = new SortKey[sortAttrs.length];
- MatchingRule[] orderingRules = new MatchingRule[sortAttrs.length];
- boolean[] ascending = new boolean[sortAttrs.length];
- for(int i = 0; i < sortAttrs.length; i++)
- {
- try
- {
- if(sortAttrs[i].startsWith("-"))
- {
- ascending[i] = false;
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- else
- {
- ascending[i] = true;
- if(sortAttrs[i].startsWith("+"))
- {
- sortAttrs[i] = sortAttrs[i].substring(1);
- }
- }
- }
- catch(Exception e)
- {
- ccr.addMessage(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortKeys[i], name));
- ccr.setResultCodeIfSuccess(ResultCode.INVALID_ATTRIBUTE_SYNTAX);
- }
-
- AttributeType attrType =
- DirectoryServer.getAttributeTypeOrNull(sortAttrs[i].toLowerCase());
- if(attrType == null)
- {
- ccr.addMessage(ERR_CONFIG_VLV_INDEX_UNDEFINED_ATTR.get(sortKeys[i], name));
- ccr.setResultCodeIfSuccess(ResultCode.INVALID_ATTRIBUTE_SYNTAX);
- }
- else
- {
- sortKeys[i] = new SortKey(attrType, ascending[i]);
- orderingRules[i] = attrType.getOrderingMatchingRule();
- }
- }
-
- this.sortOrder = new SortOrder(sortKeys);
- this.comparator = new VLVKeyComparator(orderingRules, ascending);
-
- // We have to close the database and open it using the new comparator.
- entryContainer.exclusiveLock.lock();
- try
- {
- close();
- this.dbConfig.setBtreeComparator(this.comparator);
- open();
- }
- catch(DatabaseException de)
- {
- ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(de)));
- ccr.setResultCodeIfSuccess(DirectoryServer.getServerErrorResultCode());
- }
- finally
- {
- entryContainer.exclusiveLock.unlock();
- }
-
- ccr.setAdminActionRequired(true);
- }
-
-
- if (ccr.adminActionRequired())
- {
- trusted = false;
- ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(name));
- try
- {
- state.putIndexTrustState(null, this, false);
- }
- catch(DatabaseException de)
- {
- ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(de)));
- ccr.setResultCodeIfSuccess(DirectoryServer.getServerErrorResultCode());
- }
- }
-
- this.config = cfg;
- return ccr;
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VLVKeyComparator.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VLVKeyComparator.java
deleted file mode 100644
index 80873fa..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VLVKeyComparator.java
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2008 Sun Microsystems, Inc.
- * Portions Copyright 2014-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.opendj.ldap.schema.MatchingRule;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.types.DirectoryException;
-
-import com.sleepycat.je.DatabaseComparator;
-import com.sleepycat.je.DatabaseException;
-
-/**
- * This class is used to compare the keys used in a VLV index. Each key is
- * made up the sort values and the entry ID of the largest entry in the sorted
- * set stored in the data for the key.
- */
-public class VLVKeyComparator implements DatabaseComparator
-{
- /**
- * The serial version identifier required to satisfy the compiler because this
- * class implements the <CODE>java.io.Serializable</CODE> interface. This
- * value was generated using the <CODE>serialver</CODE> command-line utility
- * included with the Java SDK.
- */
- static final long serialVersionUID = 1585167927344130604L;
-
- /** Matching rules are not serializable. */
- private transient MatchingRule[] orderingRules;
-
- /**
- * Only oids of matching rules are recorded for serialization. Oids allow to
- * retrieve matching rules after deserialization, through
- * {@code initialize(ClassLoader)} method.
- */
- private String[] orderingRuleOids;
-
- private boolean[] ascending;
-
- /**
- * Construct a new VLV Key Comparator object.
- *
- * @param orderingRules The array of ordering rules to use when comparing
- * the decoded values in the key.
- * @param ascending The array of booleans indicating the ordering for
- * each value.
- */
- public VLVKeyComparator(MatchingRule[] orderingRules, boolean[] ascending)
- {
- this.orderingRules = orderingRules;
- this.orderingRuleOids = new String[orderingRules.length];
- for (int i = 0; i < orderingRules.length; i++)
- {
- orderingRuleOids[i] = orderingRules[i].getOID();
- }
- this.ascending = ascending;
- }
-
- /**
- * Compares the contents of the provided byte arrays to determine their
- * relative order. A key in the VLV index contains the sorted attribute values
- * in order followed by the 8 byte entry ID. A attribute value of length 0
- * means that value is null and the attribute type was not part of the entry.
- * A null value is always considered greater then a non null value. If all
- * attribute values are the same, the entry ID will be used to determine the
- * ordering.
- *
- * When comparing partial keys (ie. keys with only the first attribute value
- * encoded for evaluating VLV assertion value offsets or keys with no entry
- * IDs), only information available in both byte keys will be used to
- * determine the ordering. If all available information is the same, 0 will
- * be returned.
- *
- * @param b1 The first byte array to use in the comparison.
- * @param b2 The second byte array to use in the comparison.
- *
- * @return A negative integer if <CODE>b1</CODE> should come before
- * <CODE>b2</CODE> in ascending order, a positive integer if
- * <CODE>b1</CODE> should come after <CODE>b2</CODE> in ascending
- * order, or zero if there is no difference between the values with
- * regard to ordering.
- */
- @Override
- public int compare(byte[] b1, byte[] b2)
- {
- // A 0 length byte array is a special key used for the unbound max
- // sort values set. It always comes after a non length byte array.
- if(b1.length == 0)
- {
- if(b2.length == 0)
- {
- return 0;
- }
- else
- {
- return 1;
- }
- }
- else if(b2.length == 0)
- {
- return -1;
- }
-
- int b1Pos = 0;
- int b2Pos = 0;
- for (int j=0;
- j < orderingRules.length && b1Pos < b1.length && b2Pos < b2.length;
- j++)
- {
- int b1Length = b1[b1Pos] & 0x7F;
- if (b1[b1Pos++] != b1Length)
- {
- int b1NumLengthBytes = b1Length;
- b1Length = 0;
- for (int k=0; k < b1NumLengthBytes; k++, b1Pos++)
- {
- b1Length = (b1Length << 8) |
- (b1[b1Pos] & 0xFF);
- }
- }
-
- int b2Length = b2[b2Pos] & 0x7F;
- if (b2[b2Pos++] != b2Length)
- {
- int b2NumLengthBytes = b2Length;
- b2Length = 0;
- for (int k=0; k < b2NumLengthBytes; k++, b2Pos++)
- {
- b2Length = (b2Length << 8) |
- (b2[b2Pos] & 0xFF);
- }
- }
-
- byte[] b1Bytes;
- byte[] b2Bytes;
- if(b1Length > 0)
- {
- b1Bytes = new byte[b1Length];
- System.arraycopy(b1, b1Pos, b1Bytes, 0, b1Length);
- b1Pos += b1Length;
- }
- else
- {
- b1Bytes = null;
- }
-
- if(b2Length > 0)
- {
- b2Bytes = new byte[b2Length];
- System.arraycopy(b2, b2Pos, b2Bytes, 0, b2Length);
- b2Pos += b2Length;
- }
- else
- {
- b2Bytes = null;
- }
-
- // A null value will always come after a non-null value.
- if (b1Bytes == null)
- {
- if (b2Bytes == null)
- {
- continue;
- }
- else
- {
- return 1;
- }
- }
- else if (b2Bytes == null)
- {
- return -1;
- }
-
- final ByteString val1 = ByteString.valueOfBytes(b1Bytes);
- final ByteString val2 = ByteString.valueOfBytes(b2Bytes);
- final int result = ascending[j] ? val1.compareTo(val2) : val2.compareTo(val1);
- if(result != 0)
- {
- return result;
- }
- }
-
- // If we've gotten here, then we can't tell a difference between the sets
- // of available values, so sort based on entry ID if its in the key.
-
- if(b1Pos + 8 <= b1.length && b2Pos + 8 <= b2.length)
- {
- long b1ID = JebFormat.toLong(b1, b1Pos, b1Pos + 8);
- long b2ID = JebFormat.toLong(b2, b2Pos, b2Pos + 8);
- return compare(b1ID, b2ID);
- }
-
- // If we've gotten here, then we can't tell the difference between the sets
- // of available values and entry IDs are not all available, so just return 0
- return 0;
- }
-
- /**
- * Compares the contents in the provided values set with the given values to
- * determine their relative order. A null value is always considered greater
- * then a non null value. If all attribute values are the same, the entry ID
- * will be used to determine the ordering.
- *
- * If the given attribute values array does not contain all the values in the
- * sort order, any missing values will be considered as a unknown or
- * wildcard value instead of a non existent value. When comparing partial
- * information, only values available in both the values set and the
- * given values will be used to determine the ordering. If all available
- * information is the same, 0 will be returned.
- *
- * @param set The sort values set to containing the values.
- * @param index The index of the values in the set.
- * @param entryID The entry ID to use in the comparison.
- * @param values The values to use in the comparison.
- * @return A negative integer if the values in the set should come before
- * the given values in ascending order, a positive integer if
- * the values in the set should come after the given values in
- * ascending order, or zero if there is no difference between the
- * values with regard to ordering.
- * @throws DatabaseException If an error occurs during an operation on a
- * JE database.
- * @throws DirectoryException If an error occurs while trying to
- * normalize the value (e.g., if it is
- * not acceptable for use with the
- * associated equality matching rule).
- */
- public int compare(SortValuesSet set, int index, long entryID,
- ByteString[] values) throws DatabaseException, DirectoryException
- {
- for (int j=0; j < orderingRules.length; j++)
- {
- if(j >= values.length)
- {
- break;
- }
-
- ByteString b1Bytes = set.getValue(index * orderingRules.length + j);
- ByteString b2Bytes = null;
-
- if(values[j] != null)
- {
- try
- {
- b2Bytes = orderingRules[j].normalizeAttributeValue(values[j]);
- }
- catch (DecodeException e)
- {
- throw new DirectoryException(
- ResultCode.INVALID_ATTRIBUTE_SYNTAX, e.getMessageObject(), e);
- }
- }
-
- // A null value will always come after a non-null value.
- if (b1Bytes == null)
- {
- if (b2Bytes == null)
- {
- continue;
- }
- else
- {
- return 1;
- }
- }
- else if (b2Bytes == null)
- {
- return -1;
- }
-
- final int result = ascending[j] ? b1Bytes.compareTo(b2Bytes) : b2Bytes.compareTo(b1Bytes);
- if(result != 0)
- {
- return result;
- }
- }
-
- if(entryID != -1)
- {
- // If we've gotten here, then we can't tell a difference between the sets
- // of values, so sort based on entry ID.
- return compare(set.getEntryIDs()[index], entryID);
- }
-
- // If we've gotten here, then we can't tell the difference between the sets
- // of available values and the entry ID is not available. Just return 0.
- return 0;
- }
-
- private int compare(long l1, long l2)
- {
- final long difference = l1 - l2;
- if (difference < 0)
- {
- return -1;
- }
- else if (difference > 0)
- {
- return 1;
- }
- else
- {
- return 0;
- }
- }
-
- @Override
- public void initialize(ClassLoader loader)
- {
- if (orderingRules == null && orderingRuleOids != null)
- {
- orderingRules = new MatchingRule[orderingRuleOids.length];
- for (int i = 0; i < orderingRuleOids.length; i++)
- {
- orderingRules[i] = DirectoryServer.getSchema().getMatchingRule(orderingRuleOids[i]);
- }
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VerifyJob.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VerifyJob.java
deleted file mode 100644
index 273fff4..0000000
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/VerifyJob.java
+++ /dev/null
@@ -1,1648 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
- *
- * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
- * or http://forgerock.org/license/CDDLv1.0.html.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at legal-notices/CDDLv1_0.txt.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information:
- * Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- *
- *
- * Copyright 2006-2010 Sun Microsystems, Inc.
- * Portions Copyright 2011-2015 ForgeRock AS
- */
-package org.opends.server.backends.jeb;
-
-import static org.opends.messages.BackendMessages.*;
-
-import java.util.*;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.forgerock.i18n.LocalizableMessage;
-import org.forgerock.i18n.slf4j.LocalizedLogger;
-import org.forgerock.opendj.ldap.ByteString;
-import org.forgerock.opendj.ldap.ConditionResult;
-import org.forgerock.opendj.ldap.DecodeException;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.forgerock.opendj.ldap.schema.MatchingRule;
-import org.opends.server.backends.VerifyConfig;
-import org.opends.server.core.DirectoryServer;
-import org.opends.server.types.*;
-import org.opends.server.util.ServerConstants;
-import org.opends.server.util.StaticUtils;
-
-import com.sleepycat.je.*;
-
-/** This class is used to run an index verification process on the backend. */
-public class VerifyJob
-{
- private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
-
- /** The verify configuration. */
- private final VerifyConfig verifyConfig;
- /** The root container used for the verify job. */
- private RootContainer rootContainer;
-
- /** The number of milliseconds between job progress reports. */
- private final long progressInterval = 10000;
- /** The number of index keys processed. */
- private long keyCount;
- /** The number of errors found. */
- private long errorCount;
- /** The number of records that have exceeded the entry limit. */
- private long entryLimitExceededCount;
- /** The number of records that reference more than one entry. */
- private long multiReferenceCount;
- /** The total number of entry references. */
- private long entryReferencesCount;
- /** The maximum number of references per record. */
- private long maxEntryPerValue;
-
- /**
- * This map is used to gather some statistics about values that have
- * exceeded the entry limit.
- */
- private IdentityHashMap<Index, HashMap<ByteString, Long>> entryLimitMap = new IdentityHashMap<>();
-
- /** Indicates whether the DN database is to be verified. */
- private boolean verifyDN2ID;
- /** Indicates whether the children database is to be verified. */
- private boolean verifyID2Children;
- /** Indicates whether the subtree database is to be verified. */
- private boolean verifyID2Subtree;
-
- /** The entry database. */
- private ID2Entry id2entry;
- /** The DN database. */
- private DN2ID dn2id;
- /** The children database. */
- private Index id2c;
- /** The subtree database. */
- private Index id2s;
-
- /** A list of the attribute indexes to be verified. */
- private final ArrayList<AttributeIndex> attrIndexList = new ArrayList<>();
- /** A list of the VLV indexes to be verified. */
- private final ArrayList<VLVIndex> vlvIndexList = new ArrayList<>();
-
- /**
- * Construct a VerifyJob.
- *
- * @param verifyConfig The verify configuration.
- */
- public VerifyJob(VerifyConfig verifyConfig)
- {
- this.verifyConfig = verifyConfig;
- }
-
- /**
- * Verify the backend.
- *
- * @param rootContainer The root container that holds the entries to verify.
- * @return The error count.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws JebException If an error occurs in the JE backend.
- * @throws DirectoryException If an error occurs while verifying the backend.
- */
- public long verifyBackend(RootContainer rootContainer) throws DatabaseException, JebException, DirectoryException
- {
- this.rootContainer = rootContainer;
- EntryContainer entryContainer =
- rootContainer.getEntryContainer(verifyConfig.getBaseDN());
-
- entryContainer.sharedLock.lock();
- try
- {
- final List<String> completeList = verifyConfig.getCompleteList();
- final List<String> cleanList = verifyConfig.getCleanList();
-
- boolean cleanMode = false;
- if (completeList.isEmpty() && cleanList.isEmpty())
- {
- verifyDN2ID = true;
- if (rootContainer.getConfiguration().isSubordinateIndexesEnabled())
- {
- verifyID2Children = true;
- verifyID2Subtree = true;
- }
- for (AttributeIndex index : entryContainer.getAttributeIndexes())
- {
- if (index.isTrusted())
- {
- attrIndexList.add(index);
- }
- }
- }
- else
- {
- final List<String> list;
- if (!completeList.isEmpty())
- {
- list = completeList;
- }
- else
- {
- list = cleanList;
- cleanMode = true;
- }
-
- for (String index : list)
- {
- String lowerName = index.toLowerCase();
- if ("dn2id".equals(lowerName))
- {
- verifyDN2ID = true;
- }
- else if ("id2children".equals(lowerName))
- {
- if (rootContainer.getConfiguration().isSubordinateIndexesEnabled())
- {
- verifyID2Children = true;
- }
- else
- {
- LocalizableMessage msg = NOTE_JEB_SUBORDINATE_INDEXES_DISABLED
- .get(rootContainer.getConfiguration().getBackendId());
- throw new JebException(msg);
- }
- }
- else if ("id2subtree".equals(lowerName))
- {
- if (rootContainer.getConfiguration().isSubordinateIndexesEnabled())
- {
- verifyID2Subtree = true;
- }
- else
- {
- LocalizableMessage msg = NOTE_JEB_SUBORDINATE_INDEXES_DISABLED
- .get(rootContainer.getConfiguration().getBackendId());
- throw new JebException(msg);
- }
- }
- else if(lowerName.startsWith("vlv."))
- {
- if(lowerName.length() < 5)
- {
- throw new JebException(ERR_VLV_INDEX_NOT_CONFIGURED.get(lowerName));
- }
-
- VLVIndex vlvIndex =
- entryContainer.getVLVIndex(lowerName.substring(4));
- if(vlvIndex == null)
- {
- throw new JebException(ERR_VLV_INDEX_NOT_CONFIGURED.get(lowerName.substring(4)));
- }
-
- vlvIndexList.add(vlvIndex);
- }
- else
- {
- AttributeType attrType = DirectoryServer.getAttributeTypeOrNull(lowerName);
- if (attrType == null)
- {
- throw new JebException(ERR_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index));
- }
- AttributeIndex attrIndex = entryContainer.getAttributeIndex(attrType);
- if (attrIndex == null)
- {
- throw new JebException(ERR_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index));
- }
- attrIndexList.add(attrIndex);
- }
- }
- }
-
- entryLimitMap = new IdentityHashMap<>(attrIndexList.size());
-
- // We will be updating these files independently of the indexes
- // so we need direct access to them rather than going through
- // the entry entryContainer methods.
- id2entry = entryContainer.getID2Entry();
- dn2id = entryContainer.getDN2ID();
- id2c = entryContainer.getID2Children();
- id2s = entryContainer.getID2Subtree();
-
- // Make a note of the time we started.
- long startTime = System.currentTimeMillis();
-
- // Start a timer for the progress report.
- Timer timer = new Timer();
- // Create a new progressTask based on the index count.
- TimerTask progressTask = new ProgressTask(cleanMode);
- timer.scheduleAtFixedRate(progressTask, progressInterval, progressInterval);
-
- // Iterate through the index keys.
- try
- {
- if (cleanMode)
- {
- iterateIndex();
- }
- else
- {
- iterateID2Entry();
-
- // Make sure the vlv indexes are in correct order.
- for(VLVIndex vlvIndex : vlvIndexList)
- {
- iterateVLVIndex(vlvIndex, false);
- }
- }
- }
- finally
- {
- timer.cancel();
- }
-
- long finishTime = System.currentTimeMillis();
- long totalTime = finishTime - startTime;
-
- float rate = 0;
- if (totalTime > 0)
- {
- rate = 1000f*keyCount / totalTime;
- }
-
- if (cleanMode)
- {
- logger.info(NOTE_VERIFY_CLEAN_FINAL_STATUS, keyCount, errorCount, totalTime / 1000, rate);
-
- if (multiReferenceCount > 0)
- {
- float averageEntryReferences = 0;
- if (keyCount > 0)
- {
- averageEntryReferences = entryReferencesCount/keyCount;
- }
-
- if (logger.isDebugEnabled())
- {
- logger.debug(INFO_VERIFY_MULTIPLE_REFERENCE_COUNT, multiReferenceCount);
- logger.debug(INFO_VERIFY_ENTRY_LIMIT_EXCEEDED_COUNT, entryLimitExceededCount);
- logger.debug(INFO_VERIFY_AVERAGE_REFERENCE_COUNT, averageEntryReferences);
- logger.debug(INFO_VERIFY_MAX_REFERENCE_COUNT, maxEntryPerValue);
- }
- }
- }
- else
- {
- logger.info(NOTE_VERIFY_FINAL_STATUS, keyCount, errorCount, totalTime/1000, rate);
- if (!entryLimitMap.isEmpty())
- {
- logger.debug(INFO_VERIFY_ENTRY_LIMIT_STATS_HEADER);
-
- for (Map.Entry<Index,HashMap<ByteString,Long>> mapEntry :
- entryLimitMap.entrySet())
- {
- Index index = mapEntry.getKey();
- Long[] values = mapEntry.getValue().values().toArray(new Long[0]);
-
- // Calculate the median value for entry limit exceeded.
- Arrays.sort(values);
- long medianValue;
- int x = values.length / 2;
- if (values.length % 2 == 0)
- {
- medianValue = (values[x] + values[x-1]) / 2;
- }
- else
- {
- medianValue = values[x];
- }
-
- logger.debug(INFO_VERIFY_ENTRY_LIMIT_STATS_ROW, index, values.length, values[0],
- values[values.length-1], medianValue);
- }
- }
- }
- }
- finally
- {
- entryContainer.sharedLock.unlock();
- }
- return errorCount;
- }
-
- /**
- * Iterate through the entries in id2entry to perform a check for
- * index completeness. We check that the ID for the entry is indeed
- * present in the indexes for the appropriate values.
- *
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private void iterateID2Entry() throws DatabaseException
- {
- DiskOrderedCursor cursor =
- id2entry.openCursor(new DiskOrderedCursorConfig());
- long storedEntryCount = id2entry.getRecordCount();
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
- while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
- {
- EntryID entryID;
- try
- {
- entryID = new EntryID(key);
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("Malformed id2entry ID %s.%n",
- StaticUtils.bytesToHex(key.getData()));
- }
- continue;
- }
-
- keyCount++;
-
- Entry entry;
- try
- {
- entry = ID2Entry.entryFromDatabase(
- ByteString.wrap(data.getData()),
- rootContainer.getCompressedSchema());
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("Malformed id2entry record for ID %d:%n%s%n",
- entryID.longValue(),
- StaticUtils.bytesToHex(data.getData()));
- }
- continue;
- }
-
- verifyEntry(entryID, entry);
- }
- if (keyCount != storedEntryCount)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("The stored entry count in id2entry (%d) does " +
- "not agree with the actual number of entry " +
- "records found (%d).%n", storedEntryCount, keyCount);
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Iterate through the entries in an index to perform a check for
- * index cleanliness. For each ID in the index we check that the
- * entry it refers to does indeed contain the expected value.
- *
- * @throws JebException If an error occurs in the JE backend.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If an error occurs reading values in the index.
- */
- private void iterateIndex()
- throws JebException, DatabaseException, DirectoryException
- {
- if (verifyDN2ID)
- {
- iterateDN2ID();
- }
- else if (verifyID2Children)
- {
- iterateID2Children();
- }
- else if (verifyID2Subtree)
- {
- iterateID2Subtree();
- }
- else if (!attrIndexList.isEmpty())
- {
- for (Index index : attrIndexList.get(0).getAllIndexes())
- {
- iterateAttrIndex(index);
- }
- }
- else if (!vlvIndexList.isEmpty())
- {
- iterateVLVIndex(vlvIndexList.get(0), true);
- }
- }
-
- /**
- * Iterate through the entries in DN2ID to perform a check for
- * index cleanliness.
- *
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private void iterateDN2ID() throws DatabaseException
- {
- DiskOrderedCursor cursor = dn2id.openCursor(new DiskOrderedCursorConfig());
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
- {
- keyCount++;
-
- EntryID entryID;
- try
- {
- entryID = new EntryID(data);
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("File dn2id has malformed ID for DN <%s>:%n%s%n",
- new String(key.getData()),
- StaticUtils.bytesToHex(data.getData()));
- }
- continue;
- }
-
- Entry entry;
- try
- {
- entry = id2entry.get(null, entryID, LockMode.DEFAULT);
- }
- catch (Exception e)
- {
- errorCount++;
- logger.traceException(e);
- continue;
- }
-
- if (entry == null)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File dn2id has DN <%s> referencing unknown " +
- "ID %d%n", new String(key.getData()), entryID.longValue());
- }
- }
- else if (!Arrays.equals(JebFormat.dnToDNKey(
- entry.getName(), verifyConfig.getBaseDN().size()), key.getData()))
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File dn2id has DN <%s> referencing entry with wrong DN <%s>%n",
- new String(key.getData()), entry.getName());
- }
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Iterate through the entries in ID2Children to perform a check for
- * index cleanliness.
- *
- * @throws JebException If an error occurs in the JE backend.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private void iterateID2Children() throws JebException, DatabaseException
- {
- DiskOrderedCursor cursor = id2c.openCursor(new DiskOrderedCursorConfig());
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
- {
- keyCount++;
-
- EntryID entryID;
- try
- {
- entryID = new EntryID(key);
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("File id2children has malformed ID %s%n",
- StaticUtils.bytesToHex(key.getData()));
- }
- continue;
- }
-
- EntryIDSet entryIDList;
-
- try
- {
- entryIDList = new EntryIDSet(key.getData(), data.getData());
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("File id2children has malformed ID list " +
- "for ID %s:%n%s%n", entryID,
- StaticUtils.bytesToHex(data.getData()));
- }
- continue;
- }
-
- updateIndexStats(entryIDList);
-
- if (entryIDList.isDefined())
- {
- Entry entry;
- try
- {
- entry = id2entry.get(null, entryID, LockMode.DEFAULT);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- errorCount++;
- continue;
- }
-
- if (entry == null)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2children has unknown ID %d%n",
- entryID.longValue());
- }
- continue;
- }
-
- for (EntryID id : entryIDList)
- {
- Entry childEntry;
- try
- {
- childEntry = id2entry.get(null, id, LockMode.DEFAULT);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- errorCount++;
- continue;
- }
-
- if (childEntry == null)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2children has ID %d referencing " +
- "unknown ID %d%n", entryID.longValue(), id.longValue());
- }
- continue;
- }
-
- if (!childEntry.getName().isDescendantOf(entry.getName()) ||
- childEntry.getName().size() !=
- entry.getName().size() + 1)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2children has ID %d with DN <%s> " +
- "referencing ID %d with non-child DN <%s>%n",
- entryID.longValue(), entry.getName(), id.longValue(), childEntry.getName());
- }
- }
- }
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Iterate through the entries in ID2Subtree to perform a check for
- * index cleanliness.
- *
- * @throws JebException If an error occurs in the JE backend.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private void iterateID2Subtree() throws JebException, DatabaseException
- {
- DiskOrderedCursor cursor = id2s.openCursor(new DiskOrderedCursorConfig());
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
- {
- keyCount++;
-
- EntryID entryID;
- try
- {
- entryID = new EntryID(key);
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("File id2subtree has malformed ID %s%n",
- StaticUtils.bytesToHex(key.getData()));
- }
- continue;
- }
-
- EntryIDSet entryIDList;
- try
- {
- entryIDList = new EntryIDSet(key.getData(), data.getData());
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("File id2subtree has malformed ID list " +
- "for ID %s:%n%s%n", entryID,
- StaticUtils.bytesToHex(data.getData()));
- }
- continue;
- }
-
- updateIndexStats(entryIDList);
-
- if (entryIDList.isDefined())
- {
- Entry entry;
- try
- {
- entry = id2entry.get(null, entryID, LockMode.DEFAULT);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- errorCount++;
- continue;
- }
-
- if (entry == null)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2subtree has unknown ID %d%n",
- entryID.longValue());
- }
- continue;
- }
-
- for (EntryID id : entryIDList)
- {
- Entry subordEntry;
- try
- {
- subordEntry = id2entry.get(null, id, LockMode.DEFAULT);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- errorCount++;
- continue;
- }
-
- if (subordEntry == null)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2subtree has ID %d referencing " +
- "unknown ID %d%n", entryID.longValue(), id.longValue());
- }
- continue;
- }
-
- if (!subordEntry.getName().isDescendantOf(entry.getName()))
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2subtree has ID %d with DN <%s> " +
- "referencing ID %d with non-subordinate DN <%s>%n",
- entryID.longValue(), entry.getName(), id.longValue(), subordEntry.getName());
- }
- }
- }
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Increment the counter for a key that has exceeded the
- * entry limit. The counter gives the number of entries that have
- * referenced the key.
- *
- * @param index The index containing the key.
- * @param key A key that has exceeded the entry limit.
- */
- private void incrEntryLimitStats(Index index, byte[] key)
- {
- HashMap<ByteString,Long> hashMap = entryLimitMap.get(index);
- if (hashMap == null)
- {
- hashMap = new HashMap<>();
- entryLimitMap.put(index, hashMap);
- }
- ByteString octetString = ByteString.wrap(key);
- Long counter = hashMap.get(octetString);
- if (counter != null)
- {
- counter++;
- }
- else
- {
- counter = 1L;
- }
- hashMap.put(octetString, counter);
- }
-
- /**
- * Update the statistical information for an index record.
- *
- * @param entryIDSet The set of entry IDs for the index record.
- */
- private void updateIndexStats(EntryIDSet entryIDSet)
- {
- if (!entryIDSet.isDefined())
- {
- entryLimitExceededCount++;
- multiReferenceCount++;
- }
- else
- {
- if (entryIDSet.size() > 1)
- {
- multiReferenceCount++;
- }
- entryReferencesCount += entryIDSet.size();
- maxEntryPerValue = Math.max(maxEntryPerValue, entryIDSet.size());
- }
- }
-
- /**
- * Iterate through the entries in a VLV index to perform a check for index
- * cleanliness.
- *
- * @param vlvIndex The VLV index to perform the check against.
- * @param verifyID True to verify the IDs against id2entry.
- * @throws JebException If an error occurs in the JE backend.
- * @throws DatabaseException If an error occurs in the JE database.
- * @throws DirectoryException If an error occurs reading values in the index.
- */
- private void iterateVLVIndex(VLVIndex vlvIndex, boolean verifyID)
- throws JebException, DatabaseException, DirectoryException
- {
- if(vlvIndex == null)
- {
- return;
- }
-
- DiskOrderedCursor cursor =
- vlvIndex.openCursor(new DiskOrderedCursorConfig());
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- SortValues lastValues = null;
- while(cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
- {
- SortValuesSet sortValuesSet =
- new SortValuesSet(key.getData(), data.getData(), vlvIndex);
- for(int i = 0; i < sortValuesSet.getEntryIDs().length; i++)
- {
- keyCount++;
- SortValues values = sortValuesSet.getSortValues(i);
- if(lastValues != null && lastValues.compareTo(values) >= 1)
- {
- // Make sure the values is larger then the previous one.
- if(logger.isTraceEnabled())
- {
- logger.trace("Values %s and %s are incorrectly ordered",
- lastValues, values, keyDump(vlvIndex,
- sortValuesSet.getKeySortValues()));
- }
- errorCount++;
- }
- if(i == sortValuesSet.getEntryIDs().length - 1 &&
- key.getData().length != 0)
- {
- // If this is the last one in a bounded set, make sure it is the
- // same as the database key.
- byte[] encodedKey = vlvIndex.encodeKey(
- values.getEntryID(), values.getValues(), values.getTypes());
- if(!Arrays.equals(key.getData(), encodedKey))
- {
- if(logger.isTraceEnabled())
- {
- logger.trace("Incorrect key for SortValuesSet in VLV " +
- "index %s. Last values bytes %s, Key bytes %s",
- vlvIndex.getName(), encodedKey, key);
- }
- errorCount++;
- }
- }
- lastValues = values;
-
- if(verifyID)
- {
- Entry entry;
- EntryID id = new EntryID(values.getEntryID());
- try
- {
- entry = id2entry.get(null, id, LockMode.DEFAULT);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- errorCount++;
- continue;
- }
-
- if (entry == null)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("Reference to unknown ID %d%n%s",
- id.longValue(),
- keyDump(vlvIndex,
- sortValuesSet.getKeySortValues()));
- }
- continue;
- }
-
- SortValues entryValues =
- new SortValues(id, entry, vlvIndex.sortOrder);
- if(entryValues.compareTo(values) != 0)
- {
- errorCount++;
- if(logger.isTraceEnabled())
- {
- logger.trace("Reference to entry ID %d " +
- "which does not match the values%n%s",
- id.longValue(),
- keyDump(vlvIndex,
- sortValuesSet.getKeySortValues()));
- }
- }
- }
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Iterate through the entries in an attribute index to perform a check for
- * index cleanliness.
- * @param index The index database to be checked.
- * @throws JebException If an error occurs in the JE backend.
- * @throws DatabaseException If an error occurs in the JE database.
- */
- private void iterateAttrIndex(Index index) throws JebException, DatabaseException
- {
- if (index == null)
- {
- return;
- }
-
- DiskOrderedCursor cursor = index.openCursor(new DiskOrderedCursorConfig());
- try
- {
- DatabaseEntry key = new DatabaseEntry();
- DatabaseEntry data = new DatabaseEntry();
-
- while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
- {
- keyCount++;
-
- EntryIDSet entryIDList;
- try
- {
- entryIDList = new EntryIDSet(key.getData(), data.getData());
- }
- catch (Exception e)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("Malformed ID list: %s%n%s",
- StaticUtils.bytesToHex(data.getData()),
- keyDump(index, key.getData()));
- }
- continue;
- }
-
- updateIndexStats(entryIDList);
-
- if (entryIDList.isDefined())
- {
- final ByteString value = ByteString.wrap(key.getData());
- EntryID prevID = null;
-
- for (EntryID id : entryIDList)
- {
- if (prevID != null && id.equals(prevID) && logger.isTraceEnabled())
- {
- logger.trace("Duplicate reference to ID %d%n%s",
- id.longValue(), keyDump(index, key.getData()));
- }
- prevID = id;
-
- Entry entry;
- try
- {
- entry = id2entry.get(null, id, LockMode.DEFAULT);
- }
- catch (Exception e)
- {
- logger.traceException(e);
- errorCount++;
- continue;
- }
-
- if (entry == null)
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("Reference to unknown ID %d%n%s",
- id.longValue(), keyDump(index, key.getData()));
- }
- continue;
- }
-
- // As an optimization avoid passing in a real set and wasting time
- // hashing and comparing a potentially large set of values, as well
- // as using up memory. Instead just intercept the add() method and
- // detect when an equivalent value has been added.
-
- // We need to use an AtomicBoolean here since anonymous classes
- // require referenced external variables to be final.
- final AtomicBoolean foundMatchingKey = new AtomicBoolean(false);
-
- Set<ByteString> dummySet = new AbstractSet<ByteString>()
- {
- @Override
- public Iterator<ByteString> iterator()
- {
- // The set is always empty.
- return Collections.<ByteString> emptySet().iterator();
- }
-
- @Override
- public int size()
- {
- // The set is always empty.
- return 0;
- }
-
- @Override
- public boolean add(ByteString e)
- {
- if (value.equals(e))
- {
- // We could terminate processing at this point by throwing an
- // UnsupportedOperationException, but this optimization is
- // already ugly enough.
- foundMatchingKey.set(true);
- }
- return true;
- }
-
- };
-
- index.indexEntry(entry, dummySet);
-
- if (!foundMatchingKey.get())
- {
- errorCount++;
- if (logger.isTraceEnabled())
- {
- logger.trace("Reference to entry "
- + "<%s> which does not match the value%n%s",
- entry.getName(),
- keyDump(index, value.toByteArray()));
- }
- }
- }
- }
- }
- }
- finally
- {
- cursor.close();
- }
- }
-
- /**
- * Check that an index is complete for a given entry.
- *
- * @param entryID The entry ID.
- * @param entry The entry to be checked.
- */
- private void verifyEntry(EntryID entryID, Entry entry)
- {
- if (verifyDN2ID)
- {
- verifyDN2ID(entryID, entry);
- }
- if (verifyID2Children)
- {
- verifyID2Children(entryID, entry);
- }
- if (verifyID2Subtree)
- {
- verifyID2Subtree(entryID, entry);
- }
- verifyIndex(entryID, entry);
- }
-
- /**
- * Check that the DN2ID index is complete for a given entry.
- *
- * @param entryID The entry ID.
- * @param entry The entry to be checked.
- */
- private void verifyDN2ID(EntryID entryID, Entry entry)
- {
- DN dn = entry.getName();
-
- // Check the ID is in dn2id with the correct DN.
- try
- {
- EntryID id = dn2id.get(null, dn, LockMode.DEFAULT);
- if (id == null)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("File dn2id is missing key %s.%n", dn);
- }
- errorCount++;
- }
- else if (!id.equals(entryID))
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("File dn2id has ID %d instead of %d for key %s.%n", id.longValue(), entryID.longValue(), dn);
- }
- errorCount++;
- }
- }
- catch (Exception e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
- logger.trace("File dn2id has error reading key %s: %s.%n", dn, e.getMessage());
- }
- errorCount++;
- }
-
- // Check the parent DN is in dn2id.
- DN parentDN = getParent(dn);
- if (parentDN != null)
- {
- try
- {
- EntryID id = dn2id.get(null, parentDN, LockMode.DEFAULT);
- if (id == null)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("File dn2id is missing key %s.%n", parentDN);
- }
- errorCount++;
- }
- }
- catch (Exception e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
- logger.trace("File dn2id has error reading key %s: %s.%n", parentDN, e.getMessage());
- }
- errorCount++;
- }
- }
- }
-
- /**
- * Check that the ID2Children index is complete for a given entry.
- *
- * @param entryID The entry ID.
- * @param entry The entry to be checked.
- */
- private void verifyID2Children(EntryID entryID, Entry entry)
- {
- DN dn = entry.getName();
-
- DN parentDN = getParent(dn);
- if (parentDN != null)
- {
- EntryID parentID = null;
- try
- {
- parentID = dn2id.get(null, parentDN, LockMode.DEFAULT);
- if (parentID == null)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("File dn2id is missing key %s.%n", parentDN);
- }
- errorCount++;
- }
- }
- catch (Exception e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
- logger.trace("File dn2id has error reading key %s: %s.", parentDN, e.getMessage());
- }
- errorCount++;
- }
- if (parentID != null)
- {
- try
- {
- ConditionResult cr = id2c.containsID(null, parentID.getDatabaseEntry(), entryID);
- if (cr == ConditionResult.FALSE)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2children is missing ID %d for key %d.%n",
- entryID.longValue(), parentID.longValue());
- }
- errorCount++;
- }
- else if (cr == ConditionResult.UNDEFINED)
- {
- incrEntryLimitStats(id2c, parentID.getDatabaseEntry().getData());
- }
- }
- catch (DatabaseException e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("File id2children has error reading key %d: %s.",
- parentID.longValue(), e.getMessage());
- }
- errorCount++;
- }
- }
- }
- }
-
- /**
- * Check that the ID2Subtree index is complete for a given entry.
- *
- * @param entryID The entry ID.
- * @param entry The entry to be checked.
- */
- private void verifyID2Subtree(EntryID entryID, Entry entry)
- {
- for (DN dn = getParent(entry.getName()); dn != null; dn = getParent(dn))
- {
- EntryID id = null;
- try
- {
- id = dn2id.get(null, dn, LockMode.DEFAULT);
- if (id == null)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("File dn2id is missing key %s.%n", dn);
- }
- errorCount++;
- }
- }
- catch (Exception e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
- logger.trace("File dn2id has error reading key %s: %s.%n", dn, e.getMessage());
- }
- errorCount++;
- }
- if (id != null)
- {
- try
- {
- ConditionResult cr;
- cr = id2s.containsID(null, id.getDatabaseEntry(), entryID);
- if (cr == ConditionResult.FALSE)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("File id2subtree is missing ID %d " +
- "for key %d.%n",
- entryID.longValue(), id.longValue());
- }
- errorCount++;
- }
- else if (cr == ConditionResult.UNDEFINED)
- {
- incrEntryLimitStats(id2s, id.getDatabaseEntry().getData());
- }
- }
- catch (DatabaseException e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("File id2subtree has error reading key %d: %s.%n",
- id.longValue(), e.getMessage());
- }
- errorCount++;
- }
- }
- }
- }
-
- /**
- * Construct a printable string from a raw key value.
- *
- * @param index The index database containing the key value.
- * @param keyBytes The bytes of the key.
- * @return A string that may be logged or printed.
- */
- private String keyDump(Index index, byte[] keyBytes)
- {
- StringBuilder buffer = new StringBuilder(128);
- buffer.append("File: ");
- buffer.append(index);
- buffer.append(ServerConstants.EOL);
- buffer.append("Key:");
- buffer.append(ServerConstants.EOL);
- StaticUtils.byteArrayToHexPlusAscii(buffer, keyBytes, 6);
- return buffer.toString();
- }
-
- /**
- * Construct a printable string from a raw key value.
- *
- * @param vlvIndex The vlvIndex database containing the key value.
- * @param keySortValues THe sort values that is being used as the key.
- * @return A string that may be logged or printed.
- */
- private String keyDump(VLVIndex vlvIndex, SortValues keySortValues)
- {
- StringBuilder buffer = new StringBuilder(128);
- buffer.append("File: ");
- buffer.append(vlvIndex);
- buffer.append(ServerConstants.EOL);
- buffer.append("Key (last sort values):");
- if(keySortValues != null)
- {
- buffer.append(keySortValues);
- }
- else
- {
- buffer.append("UNBOUNDED (0x00)");
- }
- return buffer.toString();
- }
-
- /**
- * Check that an attribute index is complete for a given entry.
- *
- * @param entryID The entry ID.
- * @param entry The entry to be checked.
- */
- private void verifyIndex(EntryID entryID, Entry entry)
- {
- for (AttributeIndex attrIndex : attrIndexList)
- {
- try
- {
- verifyAttribute(attrIndex, entryID, entry);
- }
- catch (DirectoryException e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("Error normalizing values of attribute %s in " +
- "entry <%s>: %s.%n",
- attrIndex.getAttributeType(), entry.getName(), e.getMessageObject());
- }
- }
- }
-
- for (VLVIndex vlvIndex : vlvIndexList)
- {
- try
- {
- if (vlvIndex.shouldInclude(entry)
- && !vlvIndex.containsValues(null, entryID.longValue(),
- vlvIndex.getSortValues(entry), vlvIndex.getSortTypes()))
- {
- if(logger.isTraceEnabled())
- {
- logger.trace("Missing entry %s in VLV index %s", entry.getName(), vlvIndex.getName());
- }
- errorCount++;
- }
- }
- catch (DirectoryException e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
- logger.trace("Error checking entry %s against filter or base DN for VLV index %s: %s",
- entry.getName(), vlvIndex.getName(), e.getMessageObject());
- }
- errorCount++;
- }
- catch (DatabaseException | JebException e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
- logger.trace("Error reading VLV index %s for entry %s: %s",
- vlvIndex.getName(), entry.getName(), StaticUtils.getBacktrace(e));
- }
- errorCount++;
- }
- }
- }
-
- /**
- * Check that an attribute index is complete for a given attribute.
- *
- * @param attrIndex The attribute index to be checked.
- * @param entryID The entry ID.
- * @param attrList The attribute to be checked.
- * @throws DirectoryException If a Directory Server error occurs.
- */
- private void verifyAttribute(AttributeIndex attrIndex, EntryID entryID, Entry entry) throws DirectoryException
- {
- for (Index index : attrIndex.getAllIndexes())
- {
- final Set<ByteString> keys = new HashSet<>();
- index.indexEntry(entry, keys);
- for (ByteString key : keys)
- {
- verifyAttributeInIndex(index, null, key, entryID);
- }
- }
- }
-
- private void verifyAttributeInIndex(Index index, Transaction txn, ByteString key, EntryID entryID)
- {
- try
- {
- final ConditionResult cr = index.containsID(txn, new DatabaseEntry(key.toByteArray()), entryID);
- if (cr == ConditionResult.FALSE)
- {
- if (logger.isTraceEnabled())
- {
- logger.trace("Missing ID %d%n%s",
- entryID.longValue(),
- keyDump(index, key.toByteArray()));
- }
- errorCount++;
- }
- else if (cr == ConditionResult.UNDEFINED)
- {
- incrEntryLimitStats(index, key.toByteArray());
- }
- }
- catch (DatabaseException e)
- {
- if (logger.isTraceEnabled())
- {
- logger.traceException(e);
-
- logger.trace("Error reading database: %s%n%s",
- e.getMessage(),
- keyDump(index, key.toByteArray()));
- }
- errorCount++;
- }
- }
-
- private byte[] normalize(MatchingRule matchingRule,
- ByteString value) throws DirectoryException
- {
- try
- {
- return matchingRule.normalizeAttributeValue(value).toByteArray();
- }
- catch (DecodeException e)
- {
- throw new DirectoryException(ResultCode.INVALID_ATTRIBUTE_SYNTAX,
- e.getMessageObject(), e);
- }
- }
-
- /**
- * Get the parent DN of a given DN.
- *
- * @param dn The DN.
- * @return The parent DN or null if the given DN is a base DN.
- */
- private DN getParent(DN dn)
- {
- if (dn.equals(verifyConfig.getBaseDN()))
- {
- return null;
- }
- return dn.getParentDNInSuffix();
- }
-
- /** This class reports progress of the verify job at fixed intervals. */
- private final class ProgressTask extends TimerTask
- {
- /** The total number of records to process. */
- private long totalCount;
-
- /**
- * The number of records that had been processed at the time of the
- * previous progress report.
- */
- private long previousCount;
-
- /** The time in milliseconds of the previous progress report. */
- private long previousTime;
- /** The environment statistics at the time of the previous report. */
- private EnvironmentStats prevEnvStats;
-
- /**
- * The number of bytes in a megabyte.
- * Note that 1024*1024 bytes may eventually become known as a mebibyte(MiB).
- */
- private static final int bytesPerMegabyte = 1024*1024;
-
- /**
- * Create a new verify progress task.
- * @param indexIterator boolean, indicates if the task is iterating
- * through indexes or the entries.
- * @throws DatabaseException An error occurred while accessing the JE
- * database.
- */
- private ProgressTask(boolean indexIterator) throws DatabaseException
- {
- previousTime = System.currentTimeMillis();
- prevEnvStats = rootContainer.getEnvironmentStats(new StatsConfig());
-
- if (indexIterator)
- {
- if (verifyDN2ID)
- {
- totalCount = dn2id.getRecordCount();
- }
- else if (verifyID2Children)
- {
- totalCount = id2c.getRecordCount();
- }
- else if (verifyID2Subtree)
- {
- totalCount = id2s.getRecordCount();
- }
- else if(!attrIndexList.isEmpty())
- {
- AttributeIndex attrIndex = attrIndexList.get(0);
- totalCount = 0;
- for (Index index : attrIndex.getAllIndexes())
- {
- totalCount += getRecordCount(index);
- }
- }
- else if (!vlvIndexList.isEmpty())
- {
- totalCount = vlvIndexList.get(0).getRecordCount();
- }
- }
- else
- {
- totalCount = rootContainer.getEntryContainer(
- verifyConfig.getBaseDN()).getEntryCount();
- }
- }
-
- private long getRecordCount(Index index)
- {
- return index != null ? index.getRecordCount() : 0;
- }
-
- /** The action to be performed by this timer task. */
- @Override
- public void run()
- {
- long latestCount = keyCount;
- long deltaCount = latestCount - previousCount;
- long latestTime = System.currentTimeMillis();
- long deltaTime = latestTime - previousTime;
-
- if (deltaTime == 0)
- {
- return;
- }
-
- float rate = 1000f*deltaCount / deltaTime;
-
- logger.info(NOTE_VERIFY_PROGRESS_REPORT, latestCount, totalCount, errorCount, rate);
-
- try
- {
- Runtime runtime = Runtime.getRuntime();
- long freeMemory = runtime.freeMemory() / bytesPerMegabyte;
-
- EnvironmentStats envStats =
- rootContainer.getEnvironmentStats(new StatsConfig());
- long nCacheMiss =
- envStats.getNCacheMiss() - prevEnvStats.getNCacheMiss();
-
- float cacheMissRate = 0;
- if (deltaCount > 0)
- {
- cacheMissRate = nCacheMiss/(float)deltaCount;
- }
-
- logger.debug(INFO_CACHE_AND_MEMORY_REPORT, freeMemory, cacheMissRate);
-
- prevEnvStats = envStats;
- }
- catch (DatabaseException e)
- {
- logger.traceException(e);
- }
-
-
- previousCount = latestCount;
- previousTime = latestTime;
- }
- }
-}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendCreationHelper.java b/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendCreationHelper.java
index 4da3de3..4d72d8f 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendCreationHelper.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendCreationHelper.java
@@ -26,7 +26,6 @@
package org.opends.server.tools;
import java.io.File;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
@@ -38,23 +37,16 @@
import org.forgerock.opendj.ldap.DN;
import org.forgerock.opendj.server.config.client.BackendCfgClient;
import org.forgerock.opendj.server.config.client.BackendIndexCfgClient;
-import org.forgerock.opendj.server.config.client.LocalDBBackendCfgClient;
-import org.forgerock.opendj.server.config.client.LocalDBIndexCfgClient;
import org.forgerock.opendj.server.config.client.PluggableBackendCfgClient;
import org.forgerock.opendj.server.config.client.RootCfgClient;
import org.forgerock.opendj.server.config.meta.BackendCfgDefn.WritabilityMode;
import org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn;
import org.forgerock.opendj.server.config.meta.BackendIndexCfgDefn.IndexType;
-import org.forgerock.opendj.server.config.meta.LocalDBBackendCfgDefn;
-import org.forgerock.opendj.server.config.meta.LocalDBIndexCfgDefn;
import org.forgerock.opendj.server.config.server.BackendCfg;
import org.opends.guitools.controlpanel.util.Utilities;
import org.opends.quicksetup.Installation;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
-/**
- * Utility class which can be used by tools to create a new backend with default indexes.
- */
+/** Utility class which can be used by tools to create a new backend with default indexes. */
public class BackendCreationHelper
{
/** Describes an attribute index which should be created during installation. */
@@ -162,14 +154,7 @@
backendCfgClient.setWritabilityMode(WritabilityMode.ENABLED);
backendCfgClient.commit();
- if (backendType instanceof LocalDBBackendCfgDefn)
- {
- addJEDefaultIndexes((LocalDBBackendCfgClient) backendCfgClient);
- }
- else
- {
- addBackendDefaultIndexes((PluggableBackendCfgClient) backendCfgClient);
- }
+ addBackendDefaultIndexes((PluggableBackendCfgClient) backendCfgClient);
}
private static void addBackendDefaultIndexes(PluggableBackendCfgClient backendCfgClient) throws Exception
@@ -190,25 +175,4 @@
index.commit();
}
}
-
- @RemoveOnceLocalDBBackendIsPluggable
- private static void addJEDefaultIndexes(final LocalDBBackendCfgClient jeBackendCfgClient) throws Exception
- {
- for (DefaultIndex defaultIndex : DEFAULT_INDEXES)
- {
- final LocalDBIndexCfgClient jeIndex =
- jeBackendCfgClient.createLocalDBIndex(LocalDBIndexCfgDefn.getInstance(), defaultIndex.name, null);
-
- final List<LocalDBIndexCfgDefn.IndexType> indexTypes = new ArrayList<>();
- indexTypes.add(LocalDBIndexCfgDefn.IndexType.EQUALITY);
- if (defaultIndex.shouldCreateSubstringIndex)
- {
- indexTypes.add(LocalDBIndexCfgDefn.IndexType.SUBSTRING);
- }
- jeIndex.setIndexType(indexTypes);
-
- jeIndex.commit();
- }
- }
-
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendTypeHelper.java b/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendTypeHelper.java
index 8dada30..0f8ef46 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendTypeHelper.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/tools/BackendTypeHelper.java
@@ -36,11 +36,9 @@
import org.forgerock.opendj.config.DefinedDefaultBehaviorProvider;
import org.forgerock.opendj.config.ManagedObjectDefinition;
import org.forgerock.opendj.server.config.client.BackendCfgClient;
-import org.forgerock.opendj.server.config.meta.LocalDBBackendCfgDefn;
import org.forgerock.opendj.server.config.meta.PluggableBackendCfgDefn;
import org.forgerock.opendj.server.config.server.BackendCfg;
import org.opends.guitools.controlpanel.util.Utilities;
-import org.opends.server.backends.jeb.RemoveOnceLocalDBBackendIsPluggable;
import org.opends.server.util.RemoveOnceNewConfigFrameworkIsUsed;
/**
@@ -130,10 +128,6 @@
? extends org.opends.server.admin.std.server.BackendCfg> getLegacyConfigurationFrameworkBackend()
{
Utilities.initializeLegacyConfigurationFramework();
- if (isLocalDBBackend())
- {
- return org.opends.server.admin.std.meta.LocalDBBackendCfgDefn.getInstance();
- }
for (org.opends.server.admin.AbstractManagedObjectDefinition<?, ?> oldConfigBackend :
org.opends.server.admin.std.meta.PluggableBackendCfgDefn.getInstance().getAllChildren())
@@ -148,12 +142,6 @@
throw new IllegalArgumentException("Impossible to find the equivalent backend type in old config framework: "
+ getBackend().getName());
}
-
- @RemoveOnceLocalDBBackendIsPluggable
- private boolean isLocalDBBackend()
- {
- return getBackend().getName().equals(LocalDBBackendCfgDefn.getInstance().getName());
- }
}
private final List<ManagedObjectDefinition<? extends BackendCfgClient, ? extends BackendCfg>> backends;
@@ -166,8 +154,6 @@
backends = new LinkedList<>();
- addLocalDBBackendIfSupported();
-
for (AbstractManagedObjectDefinition<?, ?> backendType : PluggableBackendCfgDefn.getInstance().getAllChildren())
{
// Filtering out only the non-abstract backends to avoid users attempt to create abstract ones
@@ -187,13 +173,6 @@
}
}
- @RemoveOnceLocalDBBackendIsPluggable
- private void addLocalDBBackendIfSupported()
- {
- addToBackendListIfClassExists(
- "org.opends.server.backends.jeb.BackendImpl", LocalDBBackendCfgDefn.getInstance());
- }
-
private void addToBackendListIfClassExists(final String backendClassName,
final ManagedObjectDefinition<? extends BackendCfgClient, ? extends BackendCfg> backendToAdd)
{
diff --git a/opendj-server-legacy/src/test/java/org/opends/server/TestCaseUtils.java b/opendj-server-legacy/src/test/java/org/opends/server/TestCaseUtils.java
index 1935ea1..ba89694 100644
--- a/opendj-server-legacy/src/test/java/org/opends/server/TestCaseUtils.java
+++ b/opendj-server-legacy/src/test/java/org/opends/server/TestCaseUtils.java
@@ -57,7 +57,9 @@
import org.opends.server.api.WorkQueue;
import org.opends.server.api.plugin.PluginType;
import org.opends.server.backends.MemoryBackend;
-import org.opends.server.backends.jeb.*;
+import org.opends.server.backends.pluggable.BackendImpl;
+import org.opends.server.backends.pluggable.EntryContainer;
+import org.opends.server.backends.pluggable.RootContainer;
import org.opends.server.core.AddOperation;
import org.opends.server.core.DeleteOperation;
import org.opends.server.core.DirectoryServer;
@@ -865,22 +867,6 @@
for (EntryContainer ec : rootContainer.getEntryContainers())
{
ec.clear();
- assertEquals(ec.getHighestEntryID().longValue(), 0L);
- }
- rootContainer.resetNextEntryID();
- return true;
- }
- }
- else if (b instanceof org.opends.server.backends.pluggable.BackendImpl)
- {
- final org.opends.server.backends.pluggable.BackendImpl backend =
- (org.opends.server.backends.pluggable.BackendImpl) b;
- final org.opends.server.backends.pluggable.RootContainer rootContainer = backend.getRootContainer();
- if (rootContainer != null)
- {
- for (org.opends.server.backends.pluggable.EntryContainer ec : rootContainer.getEntryContainers())
- {
- ec.clear();
// assertEquals(ec.getHighestEntryID().longValue(), 0L);
}
rootContainer.resetNextEntryID();
@@ -891,36 +877,6 @@
}
/**
- * This was used to track down which test was trashing the indexes. We left it
- * here because it might be useful again.
- */
- public static void printUntrustedIndexes()
- {
- try {
- BackendImpl backend = (BackendImpl)DirectoryServer.getBackend("userRoot");
- if (backend == null) {
- return;
- }
- RootContainer rootContainer = backend.getRootContainer();
- for (EntryContainer ec : rootContainer.getEntryContainers())
- {
- List<DatabaseContainer> databases = new ArrayList<>();
- ec.listDatabases(databases);
- for (DatabaseContainer dbContainer: databases) {
- if (dbContainer instanceof Index) {
- Index index = (Index)dbContainer;
- if (!index.isTrusted()) {
- originalSystemErr.println("ERROR: The index " + index + " is no longer trusted.");
- }
- }
- }
- }
- } catch (Exception e) {
- e.printStackTrace(originalSystemErr);
- }
- }
-
- /**
* Create a temporary directory with the specified prefix.
*
* @param prefix
@@ -931,19 +887,14 @@
*/
public static File createTemporaryDirectory(String prefix)
throws IOException {
- File tempDirectory = File.createTempFile(prefix, null);
-
- if (!tempDirectory.delete()) {
- throw new IOException("Unable to delete temporary file: "
- + tempDirectory);
+ File tmpDir = File.createTempFile(prefix, null);
+ if (!tmpDir.delete()) {
+ throw new IOException("Unable to delete temporary file: " + tmpDir);
}
-
- if (!tempDirectory.mkdir()) {
- throw new IOException("Unable to create temporary directory: "
- + tempDirectory);
+ if (!tmpDir.mkdir()) {
+ throw new IOException("Unable to create temporary directory: " + tmpDir);
}
-
- return tempDirectory;
+ return tmpDir;
}
/**
diff --git a/opendj-server-legacy/src/test/java/org/opends/server/monitors/DatabaseEnvironmentMonitorTestCase.java b/opendj-server-legacy/src/test/java/org/opends/server/monitors/BackendMonitorTestCase.java
similarity index 83%
rename from opendj-server-legacy/src/test/java/org/opends/server/monitors/DatabaseEnvironmentMonitorTestCase.java
rename to opendj-server-legacy/src/test/java/org/opends/server/monitors/BackendMonitorTestCase.java
index ac1e262..3fb8b0d 100644
--- a/opendj-server-legacy/src/test/java/org/opends/server/monitors/DatabaseEnvironmentMonitorTestCase.java
+++ b/opendj-server-legacy/src/test/java/org/opends/server/monitors/BackendMonitorTestCase.java
@@ -22,37 +22,27 @@
*
*
* Copyright 2006-2008 Sun Microsystems, Inc.
+ * Portions Copyright 2015 ForgeRock AS
*/
package org.opends.server.monitors;
-
-
import org.opends.server.admin.std.server.MonitorProviderCfg;
import org.opends.server.api.MonitorProvider;
import org.opends.server.core.DirectoryServer;
-
-
-/**
- * This class defines a set of tests for the
- * org.opends.server.monitors.DatabaseEnvironmentMonitor class.
- */
-public class DatabaseEnvironmentMonitorTestCase
- extends GenericMonitorTestCase
+/** This class defines a set of tests for the {@link BackendMonitor} class. */
+public class BackendMonitorTestCase extends GenericMonitorTestCase
{
/**
* Creates a new instance of this test case class.
*
* @throws Exception If an unexpected problem occurred.
*/
- public DatabaseEnvironmentMonitorTestCase()
- throws Exception
+ public BackendMonitorTestCase() throws Exception
{
super(null);
}
-
-
/**
* Retrieves an initialized instance of the associated monitor provider.
*
@@ -60,14 +50,14 @@
*
* @throws Exception If an unexpected problem occurs.
*/
+ @Override
protected MonitorProvider getMonitorInstance()
throws Exception
{
- String monitorName = "userroot database environment";
+ String monitorName = "userroot backend";
MonitorProvider<? extends MonitorProviderCfg> provider =
DirectoryServer.getMonitorProvider(monitorName);
provider.initializeMonitorProvider(null);
return provider;
}
}
-
diff --git a/opendj-server-legacy/src/test/java/org/opends/server/tools/ArgumentParserToolsTestCase.java b/opendj-server-legacy/src/test/java/org/opends/server/tools/ArgumentParserToolsTestCase.java
index 1f14b73..070e909 100644
--- a/opendj-server-legacy/src/test/java/org/opends/server/tools/ArgumentParserToolsTestCase.java
+++ b/opendj-server-legacy/src/test/java/org/opends/server/tools/ArgumentParserToolsTestCase.java
@@ -25,15 +25,14 @@
*/
package org.opends.server.tools;
+import static com.forgerock.opendj.cli.CliMessages.*;
+
import static org.assertj.core.api.Assertions.*;
import static org.forgerock.util.Utils.*;
-import static com.forgerock.opendj.cli.CliMessages.*;
-
import java.io.PrintStream;
import org.forgerock.opendj.ldap.ByteStringBuilder;
-import org.opends.server.backends.jeb.DBTest;
import org.opends.server.tools.dsreplication.ReplicationCliMain;
import org.opends.server.tools.makeldif.MakeLDIF;
import org.opends.server.tools.status.StatusCli;
@@ -110,12 +109,6 @@
}
@Test(dataProvider = "invalidArgs")
- public void testDBTest(final String[] args)
- {
- assertToolFailsWithUsage(DBTest.main(args, false, outStream, errStream));
- }
-
- @Test(dataProvider = "invalidArgs")
public void testDSJavaProperties(final String[] args)
{
assertToolFailsWithUsage(JavaPropertiesTool.mainCLI(args, outStream, errStream, null));
diff --git a/opendj-server-legacy/tests/unit-tests-testng/resource/config-changes.ldif b/opendj-server-legacy/tests/unit-tests-testng/resource/config-changes.ldif
index e3e26f7..73c5ffc 100644
--- a/opendj-server-legacy/tests/unit-tests-testng/resource/config-changes.ldif
+++ b/opendj-server-legacy/tests/unit-tests-testng/resource/config-changes.ldif
@@ -422,9 +422,9 @@
changetype: add
objectClass: top
objectClass: ds-cfg-backend
-objectClass: ds-cfg-local-db-backend
+objectClass: ds-cfg-je-backend
ds-cfg-enabled: false
-ds-cfg-java-class: org.opends.server.backends.jeb.BackendImpl
+ds-cfg-java-class: org.opends.server.backends.jeb.JEBackend
ds-cfg-backend-id: unindexedRoot
ds-cfg-writability-mode: enabled
ds-cfg-base-dn: dc=unindexed,dc=jeb
@@ -440,9 +440,9 @@
changetype: add
objectClass: top
objectClass: ds-cfg-backend
-objectClass: ds-cfg-local-db-backend
+objectClass: ds-cfg-je-backend
ds-cfg-enabled: true
-ds-cfg-java-class: org.opends.server.backends.jeb.BackendImpl
+ds-cfg-java-class: org.opends.server.backends.jeb.JEBackend
ds-cfg-backend-id: userRoot
ds-cfg-writability-mode: enabled
ds-cfg-base-dn: dc=example,dc=com
@@ -481,14 +481,14 @@
dn: ds-cfg-attribute=aci,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: aci
ds-cfg-index-type: presence
dn: ds-cfg-attribute=cn,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: cn
ds-cfg-index-type: equality
ds-cfg-index-type: substring
@@ -496,28 +496,28 @@
dn: ds-cfg-attribute=ds-sync-hist,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-sync-hist
ds-cfg-index-type: ordering
dn: ds-cfg-attribute=ds-sync-conflict,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-sync-conflict
ds-cfg-index-type: equality
dn: ds-cfg-attribute=entryUUID,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: entryUUID
ds-cfg-index-type: equality
dn: ds-cfg-attribute=givenName,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: givenName
ds-cfg-index-type: equality
ds-cfg-index-type: substring
@@ -525,7 +525,7 @@
dn: ds-cfg-attribute=mail,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mail
ds-cfg-index-type: equality
ds-cfg-index-type: substring
@@ -533,21 +533,21 @@
dn: ds-cfg-attribute=member,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: member
ds-cfg-index-type: equality
dn: ds-cfg-attribute=objectClass,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: objectClass
ds-cfg-index-type: equality
dn: ds-cfg-attribute=sn,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: sn
ds-cfg-index-type: equality
ds-cfg-index-type: substring
@@ -555,7 +555,7 @@
dn: ds-cfg-attribute=telephoneNumber,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: telephoneNumber
ds-cfg-index-type: equality
ds-cfg-index-type: substring
@@ -563,14 +563,14 @@
dn: ds-cfg-attribute=uid,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uid
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uniqueMember,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uniqueMember
ds-cfg-index-type: equality
@@ -583,77 +583,77 @@
dn: ds-cfg-attribute=oncRpcNumber,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: oncRpcNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=bootParameter,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: bootParameter
ds-cfg-index-type: equality
dn: ds-cfg-attribute=employeeNumber,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: employeeNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-subject-dn,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-subject-dn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-fingerprint,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-fingerprint
ds-cfg-index-type: equality
dn: ds-cfg-attribute=manager,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: manager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=o,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: o
ds-cfg-index-type: equality
dn: ds-cfg-attribute=seeAlso,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: seeAlso
ds-cfg-index-type: equality
dn: ds-cfg-attribute=mobile,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mobile
ds-cfg-index-type: equality
dn: ds-cfg-attribute=facsimileTelephoneNumber,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: facsimileTelephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=pager,cn=Index,ds-cfg-backend-id=userRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: pager
ds-cfg-index-type: equality
@@ -666,7 +666,7 @@
dn: ds-cfg-attribute=mail,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mail
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -677,119 +677,119 @@
dn: ds-cfg-attribute=oncRpcNumber,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: oncRpcNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=bootParameter,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: bootParameter
ds-cfg-index-type: equality
dn: ds-cfg-attribute=employeeNumber,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: employeeNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-subject-dn,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-subject-dn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-fingerprint,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-fingerprint
ds-cfg-index-type: equality
dn: ds-cfg-attribute=manager,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: manager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=o,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: o
ds-cfg-index-type: equality
dn: ds-cfg-attribute=seeAlso,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: seeAlso
ds-cfg-index-type: equality
dn: ds-cfg-attribute=mobile,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mobile
ds-cfg-index-type: equality
dn: ds-cfg-attribute=facsimileTelephoneNumber,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: facsimileTelephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=pager,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: pager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=cn,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: cn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=sn,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: sn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=member,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: member
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uniuqeMember,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uniqueMember
ds-cfg-index-type: equality
dn: ds-cfg-attribute=telephoneNumber,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: telephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uid,cn=Index,ds-cfg-backend-id=unindexedRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uid
ds-cfg-index-type: equality
@@ -797,9 +797,9 @@
changetype: add
objectClass: top
objectClass: ds-cfg-backend
-objectClass: ds-cfg-local-db-backend
+objectClass: ds-cfg-je-backend
ds-cfg-enabled: false
-ds-cfg-java-class: org.opends.server.backends.jeb.BackendImpl
+ds-cfg-java-class: org.opends.server.backends.jeb.JEBackend
ds-cfg-backend-id: rebuildRoot
ds-cfg-writability-mode: enabled
ds-cfg-base-dn: dc=rebuild, dc=jeb
@@ -820,7 +820,7 @@
dn: ds-cfg-name=testvlvindex,cn=VLV Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-vlv-index
+objectClass: ds-cfg-backend-vlv-index
ds-cfg-name: testvlvindex
ds-cfg-base-dn: dc=rebuild, dc=jeb
ds-cfg-scope: whole-subtree
@@ -836,7 +836,7 @@
dn: ds-cfg-attribute=mail,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mail
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -847,119 +847,119 @@
dn: ds-cfg-attribute=oncRpcNumber,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: oncRpcNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=bootParameter,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: bootParameter
ds-cfg-index-type: equality
dn: ds-cfg-attribute=employeeNumber,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: employeeNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-subject-dn,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-subject-dn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-fingerprint,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-fingerprint
ds-cfg-index-type: equality
dn: ds-cfg-attribute=manager,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: manager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=o,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: o
ds-cfg-index-type: equality
dn: ds-cfg-attribute=seeAlso,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: seeAlso
ds-cfg-index-type: equality
dn: ds-cfg-attribute=mobile,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mobile
ds-cfg-index-type: equality
dn: ds-cfg-attribute=facsimileTelephoneNumber,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: facsimileTelephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=pager,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: pager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uid,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uid
ds-cfg-index-type: equality
dn: ds-cfg-attribute=cn,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: cn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=sn,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: sn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=member,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: member
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uniuqeMember,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uniqueMember
ds-cfg-index-type: equality
dn: ds-cfg-attribute=telephoneNumber,cn=Index,ds-cfg-backend-id=rebuildRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: telephoneNumber
ds-cfg-index-type: equality
@@ -967,9 +967,9 @@
changetype: add
objectClass: top
objectClass: ds-cfg-backend
-objectClass: ds-cfg-local-db-backend
+objectClass: ds-cfg-je-backend
ds-cfg-enabled: false
-ds-cfg-java-class: org.opends.server.backends.jeb.BackendImpl
+ds-cfg-java-class: org.opends.server.backends.jeb.JEBackend
ds-cfg-backend-id: importRoot
ds-cfg-writability-mode: enabled
ds-cfg-base-dn: dc=importtest, dc=com
@@ -991,7 +991,7 @@
dn: ds-cfg-name=testvlvindex,cn=VLV Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-vlv-index
+objectClass: ds-cfg-backend-vlv-index
ds-cfg-name: testvlvindex
ds-cfg-base-dn: dc=com
ds-cfg-scope: whole-subtree
@@ -1007,126 +1007,126 @@
dn: ds-cfg-attribute=oncRpcNumber,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: oncRpcNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=bootParameter,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: bootParameter
ds-cfg-index-type: equality
dn: ds-cfg-attribute=employeeNumber,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: employeeNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=mail,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mail
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-subject-dn,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-subject-dn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-fingerprint,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-fingerprint
ds-cfg-index-type: equality
dn: ds-cfg-attribute=manager,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: manager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=o,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: o
ds-cfg-index-type: equality
dn: ds-cfg-attribute=seeAlso,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: seeAlso
ds-cfg-index-type: equality
dn: ds-cfg-attribute=mobile,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mobile
ds-cfg-index-type: equality
dn: ds-cfg-attribute=facsimileTelephoneNumber,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: facsimileTelephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=pager,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: pager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=cn,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: cn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=sn,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: sn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=member,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: member
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uniuqeMember,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uniqueMember
ds-cfg-index-type: equality
dn: ds-cfg-attribute=telephoneNumber,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: telephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uid,cn=Index,ds-cfg-backend-id=importRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uid
ds-cfg-index-type: equality
@@ -1134,9 +1134,9 @@
changetype: add
objectClass: top
objectClass: ds-cfg-backend
-objectClass: ds-cfg-local-db-backend
+objectClass: ds-cfg-je-backend
ds-cfg-enabled: false
-ds-cfg-java-class: org.opends.server.backends.jeb.BackendImpl
+ds-cfg-java-class: org.opends.server.backends.jeb.JEBackend
ds-cfg-backend-id: verifyRoot
ds-cfg-writability-mode: enabled
ds-cfg-base-dn: dc=verify, dc=jeb
@@ -1157,7 +1157,7 @@
dn: ds-cfg-name=testvlvindex,cn=VLV Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-vlv-index
+objectClass: ds-cfg-backend-vlv-index
ds-cfg-name: testvlvindex
ds-cfg-base-dn: dc=verify, dc=jeb
ds-cfg-scope: whole-subtree
@@ -1173,7 +1173,7 @@
dn: ds-cfg-attribute=cn,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: cn
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1182,7 +1182,7 @@
dn: ds-cfg-attribute=givenName,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: givenName
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1191,7 +1191,7 @@
dn: ds-cfg-attribute=mail,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mail
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1201,21 +1201,21 @@
dn: ds-cfg-attribute=member,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: member
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uniqueMember,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uniqueMember
ds-cfg-index-type: equality
dn: ds-cfg-attribute=sn,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: sn
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1224,7 +1224,7 @@
dn: ds-cfg-attribute=telephoneNumber,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: telephoneNumber
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1233,98 +1233,98 @@
dn: ds-cfg-attribute=uid,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uid
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-sync-hist,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-sync-hist
ds-cfg-index-type: ordering
dn: ds-cfg-attribute=entryuuid,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: entryuuid
ds-cfg-index-type: equality
dn: ds-cfg-attribute=oncRpcNumber,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: oncRpcNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=bootParameter,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: bootParameter
ds-cfg-index-type: equality
dn: ds-cfg-attribute=employeeNumber,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: employeeNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-subject-dn,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-subject-dn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-fingerprint,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-fingerprint
ds-cfg-index-type: equality
dn: ds-cfg-attribute=manager,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: manager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=o,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: o
ds-cfg-index-type: equality
dn: ds-cfg-attribute=seeAlso,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: seeAlso
ds-cfg-index-type: equality
dn: ds-cfg-attribute=mobile,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mobile
ds-cfg-index-type: equality
dn: ds-cfg-attribute=facsimileTelephoneNumber,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: facsimileTelephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=pager,cn=Index,ds-cfg-backend-id=verifyRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: pager
ds-cfg-index-type: equality
@@ -1332,9 +1332,9 @@
changetype: add
objectClass: top
objectClass: ds-cfg-backend
-objectClass: ds-cfg-local-db-backend
+objectClass: ds-cfg-je-backend
ds-cfg-enabled: false
-ds-cfg-java-class: org.opends.server.backends.jeb.BackendImpl
+ds-cfg-java-class: org.opends.server.backends.jeb.JEBackend
ds-cfg-backend-id: indexRoot
ds-cfg-writability-mode: enabled
ds-cfg-base-dn: dc=test,dc=com
@@ -1357,13 +1357,12 @@
dn: ds-cfg-name=testvlvindex,cn=VLV Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-vlv-index
+objectClass: ds-cfg-backend-vlv-index
ds-cfg-name: testvlvindex
ds-cfg-base-dn: dc=vlvtest,dc=com
ds-cfg-scope: whole-subtree
ds-cfg-filter: (objectClass=*)
ds-cfg-sort-order: givenname -sn +uid
-ds-cfg-max-block-size: 7
dn: cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
@@ -1374,7 +1373,7 @@
dn: ds-cfg-attribute=name,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: name
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1386,7 +1385,7 @@
dn: ds-cfg-attribute=cn,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: cn
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1397,7 +1396,7 @@
dn: ds-cfg-attribute=employeeNumber,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: employeeNumber
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1407,7 +1406,7 @@
dn: ds-cfg-attribute=title,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: title
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1417,7 +1416,7 @@
dn: ds-cfg-attribute=givenName,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: givenName
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1427,7 +1426,7 @@
dn: ds-cfg-attribute=mail,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mail
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1439,21 +1438,21 @@
dn: ds-cfg-attribute=member,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: member
ds-cfg-index-type: equality
dn: ds-cfg-attribute=uniqueMember,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uniqueMember
ds-cfg-index-type: equality
dn: ds-cfg-attribute=sn,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: sn
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1462,7 +1461,7 @@
dn: ds-cfg-attribute=telephoneNumber,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: telephoneNumber
ds-cfg-index-type: presence
ds-cfg-index-type: equality
@@ -1471,77 +1470,77 @@
dn: ds-cfg-attribute=uid,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: uid
ds-cfg-index-type: equality
dn: ds-cfg-attribute=oncRpcNumber,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: oncRpcNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=bootParameter,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: bootParameter
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-subject-dn,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-subject-dn
ds-cfg-index-type: equality
dn: ds-cfg-attribute=ds-certificate-fingerprint,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: ds-certificate-fingerprint
ds-cfg-index-type: equality
dn: ds-cfg-attribute=manager,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: manager
ds-cfg-index-type: equality
dn: ds-cfg-attribute=o,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: o
ds-cfg-index-type: equality
dn: ds-cfg-attribute=seeAlso,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: seeAlso
ds-cfg-index-type: equality
dn: ds-cfg-attribute=mobile,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: mobile
ds-cfg-index-type: equality
dn: ds-cfg-attribute=facsimileTelephoneNumber,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: facsimileTelephoneNumber
ds-cfg-index-type: equality
dn: ds-cfg-attribute=pager,cn=Index,ds-cfg-backend-id=indexRoot,cn=Backends,cn=config
changetype: add
objectClass: top
-objectClass: ds-cfg-local-db-index
+objectClass: ds-cfg-backend-index
ds-cfg-attribute: pager
ds-cfg-index-type: equality
--
Gitblit v1.10.0