| | |
| | | * information: "Portions Copyright [year] [name of copyright owner]". |
| | | * |
| | | * Copyright 2009 Sun Microsystems, Inc. |
| | | * Portions copyright 2012-2015 ForgeRock AS. |
| | | * Portions copyright 2012-2016 ForgeRock AS. |
| | | */ |
| | | package org.forgerock.opendj.ldap; |
| | | |
| | |
| | | return sequence.toString(); |
| | | } |
| | | |
| | | private InputStream asInputStream() { |
| | | /** |
| | | * Returns an {@link InputStream} from the current position in the sequence. |
| | | * There is only a single {@link InputStream} for a given ByteSequence, so |
| | | * multiple calls to {@code asInputStream()} will always return the same object. |
| | | * The returned {@code InputStream} does not support {@code mark()}. |
| | | * Calling {@code close()} does nothing. |
| | | * |
| | | * @return an {@link InputStream} from the current position in the sequence |
| | | */ |
| | | public InputStream asInputStream() { |
| | | if (inputStream == null) { |
| | | inputStream = new InputStream() { |
| | | @Override |
| | |
| | | information: "Portions Copyright [year] [name of copyright owner]". |
| | | |
| | | Copyright 2007-2009 Sun Microsystems, Inc. |
| | | Portions copyright 2014 ForgeRock AS. |
| | | Portions copyright 2014-2016 ForgeRock AS. |
| | | ! --> |
| | | <adm:managed-object name="backend-index" plural-name="backend-indexes" |
| | | package="org.forgerock.opendj.server.config" |
| | |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | <adm:property name="confidentiality-enabled"> |
| | | <adm:synopsis> |
| | | Specifies whether contents of the index should be confidential. |
| | | </adm:synopsis> |
| | | <adm:description> |
| | | Setting the flag to true will hash keys for equality type indexes using SHA-1 |
| | | and encrypt the list of entries matching a substring key for substring indexes. |
| | | </adm:description> |
| | | <adm:requires-admin-action> |
| | | <adm:other> |
| | | <adm:synopsis> |
| | | If the index for the attribute must be protected for security purposes and values |
| | | for that attribute already exist in the database, the index must be rebuilt |
| | | before it will be accurate. |
| | | The property cannot be set on a backend for which confidentiality is not enabled. |
| | | </adm:synopsis> |
| | | </adm:other> |
| | | </adm:requires-admin-action> |
| | | <adm:default-behavior> |
| | | <adm:defined> |
| | | <adm:value>false</adm:value> |
| | | </adm:defined> |
| | | </adm:default-behavior> |
| | | <adm:syntax> |
| | | <adm:boolean/> |
| | | </adm:syntax> |
| | | <adm:profile name="ldap"> |
| | | <ldap:attribute> |
| | | <ldap:name>ds-cfg-confidentiality-enabled</ldap:name> |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | </adm:managed-object> |
| | |
| | | Header, with the fields enclosed by brackets [] replaced by your own identifying |
| | | information: "Portions Copyright [year] [name of copyright owner]". |
| | | |
| | | Copyright 2014-2015 ForgeRock AS. |
| | | Copyright 2014-2016 ForgeRock AS. |
| | | ! --> |
| | | <adm:managed-object abstract="true" name="pluggable-backend" |
| | | plural-name="pluggable-backends" package="org.forgerock.opendj.server.config" |
| | |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | <adm:property name="confidentiality-enabled"> |
| | | <adm:synopsis> |
| | | Indicates whether the backend should make entries in database files readable only by Directory Server. |
| | | </adm:synopsis> |
| | | <adm:description> |
| | | Confidentiality is achieved by enrypting entries before writing them to the underlying storage. |
| | | Entry encryption will protect data on disk from unauthorised parties reading the files; for complete |
| | | protection, also set confidentiality for sensitive attributes indexes. |
| | | The property cannot be set to false if some of the indexes have confidentiality set to true. |
| | | </adm:description> |
| | | <adm:default-behavior> |
| | | <adm:defined> |
| | | <adm:value>false</adm:value> |
| | | </adm:defined> |
| | | </adm:default-behavior> |
| | | <adm:syntax> |
| | | <adm:boolean /> |
| | | </adm:syntax> |
| | | <adm:profile name="ldap"> |
| | | <ldap:attribute> |
| | | <ldap:name>ds-cfg-confidentiality-enabled</ldap:name> |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | <adm:property name="cipher-transformation"> |
| | | <adm:synopsis> |
| | | Specifies the cipher for the directory server. |
| | | The syntax is "algorithm/mode/padding". |
| | | </adm:synopsis> |
| | | <adm:description> |
| | | The full transformation is required: specifying only an algorithm |
| | | and allowing the cipher provider to supply the default mode and |
| | | padding is not supported, because there is no guarantee these |
| | | default values are the same among different implementations. |
| | | Some cipher algorithms, including RC4 and ARCFOUR, do not have a |
| | | mode or padding, and hence must be specified using NONE for the |
| | | mode field and NoPadding for the padding field. For example, |
| | | RC4/NONE/NoPadding. |
| | | </adm:description> |
| | | <adm:requires-admin-action> |
| | | <adm:none> |
| | | <adm:synopsis> |
| | | Changes to this property take effect immediately but |
| | | only affect cryptographic operations performed after the |
| | | change. |
| | | </adm:synopsis> |
| | | </adm:none> |
| | | </adm:requires-admin-action> |
| | | <adm:default-behavior> |
| | | <adm:defined> |
| | | <adm:value>AES/CBC/PKCS5Padding</adm:value> |
| | | </adm:defined> |
| | | </adm:default-behavior> |
| | | <adm:syntax> |
| | | <adm:string /> |
| | | </adm:syntax> |
| | | <adm:profile name="ldap"> |
| | | <ldap:attribute> |
| | | <ldap:name>ds-cfg-cipher-transformation</ldap:name> |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | <adm:property name="cipher-key-length"> |
| | | <adm:synopsis> |
| | | Specifies the key length in bits for the preferred cipher. |
| | | </adm:synopsis> |
| | | <adm:requires-admin-action> |
| | | <adm:none> |
| | | <adm:synopsis> |
| | | Changes to this property take effect immediately but |
| | | only affect cryptographic operations performed after the |
| | | change. |
| | | </adm:synopsis> |
| | | </adm:none> |
| | | </adm:requires-admin-action> |
| | | <adm:default-behavior> |
| | | <adm:defined> |
| | | <adm:value>128</adm:value> |
| | | </adm:defined> |
| | | </adm:default-behavior> |
| | | <adm:syntax> |
| | | <adm:integer /> |
| | | </adm:syntax> |
| | | <adm:profile name="ldap"> |
| | | <ldap:attribute> |
| | | <ldap:name>ds-cfg-cipher-key-length</ldap:name> |
| | | </ldap:attribute> |
| | | </adm:profile> |
| | | </adm:property> |
| | | </adm:managed-object> |
| | |
| | | SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 |
| | | SINGLE-VALUE |
| | | X-ORIGIN 'OpenDJ Directory Server' ) |
| | | attributeTypes: ( 1.3.6.1.4.1.36733.2.1.1.159 |
| | | attributeTypes: ( 1.3.6.1.4.1.36733.2.1.1.157 |
| | | NAME 'ds-cfg-confidentiality-enabled' |
| | | EQUALITY booleanMatch |
| | | SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 |
| | | SINGLE-VALUE |
| | | X-ORIGIN 'OpenDJ Directory Server' ) |
| | | attributeTypes: ( 1.3.6.1.4.1.36733.2.1.1.158 |
| | | NAME 'ds-task-import-offheap-size' |
| | | EQUALITY integerMatch |
| | | SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 |
| | |
| | | ds-cfg-entries-compressed $ |
| | | ds-cfg-compact-encoding $ |
| | | ds-cfg-index-filter-analyzer-enabled $ |
| | | ds-cfg-confidentiality-enabled $ |
| | | ds-cfg-cipher-transformation $ |
| | | ds-cfg-cipher-key-length $ |
| | | ds-cfg-index-filter-analyzer-max-filters ) |
| | | X-ORIGIN 'OpenDJ Directory Server' ) |
| | | objectClasses: ( 1.3.6.1.4.1.36733.2.1.2.23 |
| | |
| | | ds-cfg-index-type ) |
| | | MAY ( ds-cfg-index-entry-limit $ |
| | | ds-cfg-substring-length $ |
| | | ds-cfg-confidentiality-enabled $ |
| | | ds-cfg-index-extensible-matching-rule ) |
| | | X-ORIGIN 'OpenDJ Directory Server' ) |
| | | objectClasses: ( 1.3.6.1.4.1.36733.2.1.2.25 |
| | |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.forgerock.opendj.ldap.schema.AttributeType; |
| | | import org.opends.server.crypto.CryptoSuite; |
| | | import org.opends.server.types.*; |
| | | import org.opends.server.util.StaticUtils; |
| | | |
| | |
| | | } |
| | | } |
| | | |
| | | static final String PROTECTED_INDEX_ID = ":hash"; |
| | | |
| | | /** This class implements an attribute indexer for matching rules in a Backend. */ |
| | | static final class MatchingRuleIndex extends DefaultIndex |
| | | { |
| | |
| | | private final Indexer indexer; |
| | | |
| | | private MatchingRuleIndex(EntryContainer entryContainer, AttributeType attributeType, State state, Indexer indexer, |
| | | int indexEntryLimit) |
| | | int indexEntryLimit, boolean encryptValues, CryptoSuite cryptoSuite) |
| | | { |
| | | super(getIndexName(entryContainer, attributeType, indexer.getIndexID()), state, indexEntryLimit, entryContainer); |
| | | this.attributeType = attributeType; |
| | | this.indexer = indexer; |
| | | this.encryptValues = encryptValues; |
| | | this.cryptoSuite = cryptoSuite; |
| | | } |
| | | |
| | | Set<ByteString> indexEntry(Entry entry) |
| | |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Decorates an Indexer so that we can post process key and change index name for |
| | | * those attributes declared as protected in the configuration. |
| | | */ |
| | | private static class HashedKeyEqualityIndexer implements Indexer { |
| | | |
| | | private final Indexer delegate; |
| | | private CryptoSuite cryptoSuite; |
| | | |
| | | private HashedKeyEqualityIndexer(Indexer delegate, CryptoSuite cryptoSuite) |
| | | { |
| | | this.delegate = delegate; |
| | | this.cryptoSuite = cryptoSuite; |
| | | } |
| | | |
| | | @Override |
| | | public String getIndexID() |
| | | { |
| | | return delegate.getIndexID() + PROTECTED_INDEX_ID; |
| | | } |
| | | |
| | | @Override |
| | | public void createKeys(Schema schema, ByteSequence value, Collection<ByteString> keys) throws DecodeException |
| | | { |
| | | Collection<ByteString> hashKeys = new ArrayList<>(1); |
| | | delegate.createKeys(schema, value, hashKeys); |
| | | for (ByteString key : hashKeys) |
| | | { |
| | | keys.add(cryptoSuite.hash48(key).toByteString()); |
| | | } |
| | | } |
| | | |
| | | @Override |
| | | public String keyToHumanReadableString(ByteSequence key) |
| | | { |
| | | return key.toByteString().toHexString(); |
| | | } |
| | | } |
| | | |
| | | /** The key bytes used for the presence index as a {@link ByteString}. */ |
| | | static final ByteString PRESENCE_KEY = ByteString.valueOfUtf8("+"); |
| | | |
| | |
| | | private Map<String, MatchingRuleIndex> indexIdToIndexes; |
| | | private IndexingOptions indexingOptions; |
| | | private final State state; |
| | | private final CryptoSuite cryptoSuite; |
| | | |
| | | AttributeIndex(BackendIndexCfg config, State state, EntryContainer entryContainer) throws ConfigException |
| | | AttributeIndex(BackendIndexCfg config, State state, EntryContainer entryContainer, CryptoSuite cryptoSuite) |
| | | throws ConfigException |
| | | { |
| | | this.entryContainer = entryContainer; |
| | | this.config = config; |
| | | this.state = state; |
| | | this.cryptoSuite = cryptoSuite; |
| | | this.indexingOptions = new IndexingOptionsImpl(config.getSubstringLength()); |
| | | this.indexIdToIndexes = Collections.unmodifiableMap(buildIndexes(entryContainer, state, config)); |
| | | this.indexIdToIndexes = Collections.unmodifiableMap(buildIndexes(entryContainer, state, config, cryptoSuite)); |
| | | } |
| | | |
| | | private static Map<String, MatchingRuleIndex> buildIndexes(EntryContainer entryContainer, State state, |
| | | BackendIndexCfg config) throws ConfigException |
| | | private Map<String, MatchingRuleIndex> buildIndexes(EntryContainer entryContainer, State state, |
| | | BackendIndexCfg config, CryptoSuite cryptoSuite) throws ConfigException |
| | | { |
| | | final AttributeType attributeType = config.getAttribute(); |
| | | final int indexEntryLimit = config.getIndexEntryLimit(); |
| | | final IndexingOptions indexingOptions = new IndexingOptionsImpl(config.getSubstringLength()); |
| | | |
| | | Collection<Indexer> indexers = new ArrayList<>(); |
| | | Map<Indexer, Boolean> indexers = new HashMap<>(); |
| | | for(IndexType indexType : config.getIndexType()) { |
| | | switch (indexType) |
| | | { |
| | | case PRESENCE: |
| | | indexers.add(PRESENCE_INDEXER); |
| | | indexers.put(PRESENCE_INDEXER, false); |
| | | break; |
| | | case EXTENSIBLE: |
| | | indexers.addAll( |
| | | indexers.putAll( |
| | | getExtensibleIndexers(config.getAttribute(), config.getIndexExtensibleMatchingRule(), indexingOptions)); |
| | | break; |
| | | case APPROXIMATE: |
| | | case EQUALITY: |
| | | case ORDERING: |
| | | indexers.putAll(buildBaseIndexers(config.isConfidentialityEnabled(), false, indexType, attributeType, |
| | | indexingOptions)); |
| | | break; |
| | | case SUBSTRING: |
| | | MatchingRule rule = getMatchingRule(indexType, attributeType); |
| | | throwIfNoMatchingRule(attributeType, indexType, rule); |
| | | indexers.addAll(rule.createIndexers(indexingOptions)); |
| | | indexers.putAll(buildBaseIndexers(false, config.isConfidentialityEnabled(), indexType, attributeType, |
| | | indexingOptions)); |
| | | break; |
| | | case APPROXIMATE: |
| | | case ORDERING: |
| | | indexers.putAll(buildBaseIndexers(false, false, indexType, attributeType, indexingOptions)); |
| | | break; |
| | | default: |
| | | throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attributeType, indexType)); |
| | | } |
| | | } |
| | | return buildIndexesForIndexers(entryContainer, attributeType, state, indexEntryLimit, indexers); |
| | | return buildIndexesForIndexers(entryContainer, attributeType, state, indexEntryLimit, indexers, cryptoSuite); |
| | | } |
| | | |
| | | private Map<Indexer, Boolean> buildBaseIndexers(boolean protectIndexKeys, boolean protectIndexValues, |
| | | IndexType indexType, AttributeType attributeType, IndexingOptions indexingOptions) throws ConfigException |
| | | { |
| | | Map<Indexer, Boolean> indexers = new HashMap<>(); |
| | | MatchingRule rule = getMatchingRule(indexType, attributeType); |
| | | throwIfNoMatchingRule(attributeType, indexType, rule); |
| | | throwIfProtectKeysAndValues(attributeType, protectIndexKeys, protectIndexValues); |
| | | Collection<? extends Indexer> ruleIndexers = rule.createIndexers(indexingOptions); |
| | | for (Indexer indexer: ruleIndexers) |
| | | { |
| | | if (protectIndexKeys) |
| | | { |
| | | indexers.put(new HashedKeyEqualityIndexer(indexer, cryptoSuite), false); |
| | | } |
| | | else |
| | | { |
| | | indexers.put(indexer, protectIndexValues); |
| | | } |
| | | } |
| | | return indexers; |
| | | } |
| | | |
| | | private static void throwIfNoMatchingRule(AttributeType attributeType, IndexType indexType, MatchingRule rule) |
| | |
| | | } |
| | | } |
| | | |
| | | private void throwIfProtectKeysAndValues(AttributeType attributeType, boolean protectKeys, boolean protectValues) |
| | | throws ConfigException |
| | | { |
| | | if (protectKeys && protectValues) |
| | | { |
| | | throw new ConfigException(ERR_CONFIG_INDEX_CANNOT_PROTECT_BOTH.get(attributeType)); |
| | | } |
| | | } |
| | | |
| | | private static Map<String, MatchingRuleIndex> buildIndexesForIndexers(EntryContainer entryContainer, |
| | | AttributeType attributeType, State state, int indexEntryLimit, Collection<? extends Indexer> indexers) |
| | | AttributeType attributeType, State state, int indexEntryLimit, Map<Indexer, Boolean> indexers, |
| | | CryptoSuite cryptoSuite) |
| | | { |
| | | final Map<String, MatchingRuleIndex> indexes = new HashMap<>(); |
| | | for (Indexer indexer : indexers) |
| | | for (Map.Entry<Indexer, Boolean> indexerEntry : indexers.entrySet()) |
| | | { |
| | | final String indexID = indexer.getIndexID(); |
| | | final String indexID = indexerEntry.getKey().getIndexID(); |
| | | if (!indexes.containsKey(indexID)) |
| | | { |
| | | indexes.put(indexID, new MatchingRuleIndex(entryContainer, attributeType, state, indexer, indexEntryLimit)); |
| | | indexes.put(indexID, |
| | | new MatchingRuleIndex(entryContainer, attributeType, state, indexerEntry.getKey(), |
| | | indexEntryLimit, indexerEntry.getValue(), cryptoSuite)); |
| | | } |
| | | } |
| | | return indexes; |
| | | } |
| | | |
| | | private static Collection<Indexer> getExtensibleIndexers(AttributeType attributeType, Set<String> extensibleRules, |
| | | private static Map<Indexer, Boolean> getExtensibleIndexers(AttributeType attributeType, Set<String> extensibleRules, |
| | | IndexingOptions options) throws ConfigException |
| | | { |
| | | IndexType indexType = IndexType.EXTENSIBLE; |
| | |
| | | throw new ConfigException(ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE.get(attributeType, indexType)); |
| | | } |
| | | |
| | | final Collection<Indexer> indexers = new ArrayList<>(); |
| | | final Map<Indexer, Boolean> indexers = new HashMap<>(); |
| | | for (final String ruleName : extensibleRules) |
| | | { |
| | | final MatchingRule rule = DirectoryServer.getMatchingRule(toLowerCase(ruleName)); |
| | | throwIfNoMatchingRule(attributeType, indexType, rule); |
| | | indexers.addAll(rule.createIndexers(options)); |
| | | for (Indexer indexer : rule.createIndexers(options)) |
| | | { |
| | | indexers.put(indexer, false); |
| | | } |
| | | } |
| | | |
| | | return indexers; |
| | |
| | | return config.getAttribute(); |
| | | } |
| | | |
| | | public CryptoSuite getCryptoSuite() |
| | | { |
| | | return cryptoSuite; |
| | | } |
| | | |
| | | /** |
| | | * Return the indexing options of this AttributeIndex. |
| | | * |
| | |
| | | public synchronized boolean isConfigurationChangeAcceptable( |
| | | BackendIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) |
| | | { |
| | | return isIndexAcceptable(cfg, IndexType.EQUALITY, unacceptableReasons) |
| | | return isIndexConfidentialityAcceptable(cfg, unacceptableReasons) |
| | | && isIndexAcceptable(cfg, IndexType.EQUALITY, unacceptableReasons) |
| | | && isIndexAcceptable(cfg, IndexType.SUBSTRING, unacceptableReasons) |
| | | && isIndexAcceptable(cfg, IndexType.ORDERING, unacceptableReasons) |
| | | && isIndexAcceptable(cfg, IndexType.APPROXIMATE, unacceptableReasons) |
| | | && isExtensibleIndexAcceptable(cfg, unacceptableReasons); |
| | | } |
| | | |
| | | private boolean isIndexConfidentialityAcceptable(BackendIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) |
| | | { |
| | | if (!entryContainer.isConfidentialityEnabled() && cfg.isConfidentialityEnabled()) |
| | | { |
| | | unacceptableReasons.add(ERR_CLEARTEXT_BACKEND_FOR_INDEX_CONFIDENTIALITY.get(cfg.getAttribute().getNameOrOID())); |
| | | return false; |
| | | } |
| | | return true; |
| | | } |
| | | |
| | | private boolean isExtensibleIndexAcceptable(BackendIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) |
| | | { |
| | | IndexType indexType = IndexType.EXTENSIBLE; |
| | |
| | | final IndexingOptions newIndexingOptions = new IndexingOptionsImpl(newConfiguration.getSubstringLength()); |
| | | try |
| | | { |
| | | final Map<String, MatchingRuleIndex> newIndexIdToIndexes = buildIndexes(entryContainer, state, newConfiguration); |
| | | final Map<String, MatchingRuleIndex> newIndexIdToIndexes = buildIndexes(entryContainer, state, newConfiguration, |
| | | cryptoSuite); |
| | | |
| | | final Map<String, MatchingRuleIndex> removedIndexes = new HashMap<>(indexIdToIndexes); |
| | | removedIndexes.keySet().removeAll(newIndexIdToIndexes.keySet()); |
| | |
| | | |
| | | for (Index updatedIndex : updatedIndexes.values()) |
| | | { |
| | | updateIndex(updatedIndex, newConfiguration.getIndexEntryLimit(), ccr); |
| | | updateIndex(updatedIndex, newConfiguration, ccr); |
| | | } |
| | | } |
| | | catch (Exception e) |
| | |
| | | } |
| | | } |
| | | |
| | | private static void updateIndex(Index updatedIndex, int newIndexEntryLimit, ConfigChangeResult ccr) |
| | | private static void updateIndex(Index updatedIndex, BackendIndexCfg newConfig, ConfigChangeResult ccr) |
| | | { |
| | | if (updatedIndex.setIndexEntryLimit(newIndexEntryLimit)) |
| | | // This index could still be used since a new smaller index size limit doesn't impact validity of the results. |
| | | if (updatedIndex.setIndexEntryLimit(newConfig.getIndexEntryLimit())) |
| | | { |
| | | // This index can still be used since index size limit doesn't impact validity of the results. |
| | | ccr.setAdminActionRequired(true); |
| | | ccr.addMessage(NOTE_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD.get(updatedIndex.getName())); |
| | | } |
| | | if (updatedIndex.setProtected(newConfig.isConfidentialityEnabled())) |
| | | { |
| | | ccr.setAdminActionRequired(true); |
| | | ccr.addMessage(NOTE_CONFIG_INDEX_CONFIDENTIALITY_REQUIRES_REBUILD.get(updatedIndex.getName())); |
| | | } |
| | | } |
| | | |
| | | private static void deleteIndex(WriteableTransaction txn, EntryContainer entryContainer, Index index) |
| | |
| | | return true; |
| | | } |
| | | |
| | | boolean isConfidentialityEnabled() |
| | | { |
| | | return config.isConfidentialityEnabled(); |
| | | } |
| | | |
| | | /** |
| | | * Get the tree name prefix for indexes in this attribute index. |
| | | * |
| | |
| | | final String subCommandName = subCommand.getName(); |
| | | try |
| | | { |
| | | DirectoryServer.InitializationBuilder initializationBuilder = |
| | | new DirectoryServer.InitializationBuilder(configFile.getValue()); |
| | | if (subCommandName.equals(DUMP_INDEX) || subCommandName.equals(SHOW_INDEX_STATUS)) |
| | | { |
| | | initializationBuilder.requireCryptoServices(); |
| | | } |
| | | initializationBuilder.initialize(); |
| | | new DirectoryServer.InitializationBuilder(configFile.getValue()) |
| | | .requireCryptoServices() |
| | | .initialize(); |
| | | } |
| | | catch (InitializationException e) |
| | | { |
| | |
| | | * information: "Portions Copyright [year] [name of copyright owner]". |
| | | * |
| | | * Copyright 2006-2008 Sun Microsystems, Inc. |
| | | * Portions Copyright 2014-2015 ForgeRock AS. |
| | | * Portions Copyright 2014-2016 ForgeRock AS |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import org.forgerock.util.Reject; |
| | | import org.opends.server.api.CompressedSchema; |
| | | import org.opends.server.crypto.CryptoSuite; |
| | | import org.opends.server.types.EntryEncodeConfig; |
| | | |
| | | /** |
| | |
| | | */ |
| | | final class DataConfig |
| | | { |
| | | /** |
| | | * Builder for a DataConfig with all compression/encryption options. |
| | | */ |
| | | static final class Builder |
| | | { |
| | | private boolean compressed; |
| | | private boolean encrypted; |
| | | private boolean compactEncoding; |
| | | private CompressedSchema compressedSchema; |
| | | private CryptoSuite cryptoSuite; |
| | | |
| | | Builder() |
| | | { |
| | | // Nothing to do. |
| | | } |
| | | |
| | | public Builder encode(boolean enabled) |
| | | { |
| | | this.compactEncoding = enabled; |
| | | return this; |
| | | } |
| | | |
| | | public Builder compress(boolean enabled) |
| | | { |
| | | this.compressed = enabled; |
| | | return this; |
| | | } |
| | | |
| | | public Builder encrypt(boolean enabled) |
| | | { |
| | | this.encrypted = enabled; |
| | | return this; |
| | | } |
| | | |
| | | public Builder schema(CompressedSchema schema) |
| | | { |
| | | this.compressedSchema = schema; |
| | | return this; |
| | | } |
| | | |
| | | public Builder cryptoSuite(CryptoSuite cs) |
| | | { |
| | | this.cryptoSuite = cs; |
| | | return this; |
| | | } |
| | | |
| | | public DataConfig build() |
| | | { |
| | | return new DataConfig(this); |
| | | } |
| | | } |
| | | /** Indicates whether data should be compressed before writing to the storage. */ |
| | | private final boolean compressed; |
| | | |
| | | /** The configuration to use when encoding entries in the tree. */ |
| | | private final EntryEncodeConfig encodeConfig; |
| | | |
| | | private final boolean encrypted; |
| | | |
| | | private final CryptoSuite cryptoSuite; |
| | | /** |
| | | * Construct a new DataConfig object with the specified settings. |
| | | * |
| | | * @param compressed true if data should be compressed, false if not. |
| | | * @param compactEncoding true if data should be encoded in compact form, |
| | | * false if not. |
| | | * @param compressedSchema the compressed schema manager to use. It must not |
| | | * be {@code null} if compactEncoding is {@code true}. |
| | | * @param builder the builder with the configuration |
| | | */ |
| | | DataConfig(boolean compressed, boolean compactEncoding, CompressedSchema compressedSchema) |
| | | private DataConfig(Builder builder) |
| | | { |
| | | this.compressed = compressed; |
| | | this.compressed = builder.compressed; |
| | | this.encrypted = builder.encrypted; |
| | | this.cryptoSuite = builder.cryptoSuite; |
| | | |
| | | if (compressedSchema == null) |
| | | if (builder.compressedSchema == null) |
| | | { |
| | | Reject.ifTrue(compactEncoding); |
| | | this.encodeConfig = new EntryEncodeConfig(false, compactEncoding, false); |
| | | Reject.ifTrue(builder.compactEncoding); |
| | | this.encodeConfig = new EntryEncodeConfig(false, builder.compactEncoding, false); |
| | | } |
| | | else |
| | | { |
| | | this.encodeConfig = |
| | | new EntryEncodeConfig(false, compactEncoding, compactEncoding, compressedSchema); |
| | | this.encodeConfig = new EntryEncodeConfig(false, builder.compactEncoding, builder.compactEncoding, |
| | | builder.compressedSchema); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Determine whether data should be compressed before writing to the tree. |
| | | * @return true if data should be compressed, false if not. |
| | | */ |
| | | boolean isCompressed() |
| | | { |
| | | return compressed; |
| | | } |
| | | |
| | | /** |
| | | * Get the EntryEncodeConfig object in use by this configuration. |
| | | * @return the EntryEncodeConfig object in use by this configuration. |
| | | */ |
| | | boolean isEncrypted() |
| | | { |
| | | return encrypted; |
| | | } |
| | | |
| | | EntryEncodeConfig getEntryEncodeConfig() |
| | | { |
| | | return encodeConfig; |
| | | } |
| | | |
| | | /** |
| | | * Get a string representation of this object. |
| | | * @return A string representation of this object. |
| | | */ |
| | | CryptoSuite getCryptoSuite() |
| | | { |
| | | return cryptoSuite; |
| | | } |
| | | |
| | | @Override |
| | | public String toString() |
| | | { |
| | | final StringBuilder builder = new StringBuilder(); |
| | | builder.append("DataConfig(compressed="); |
| | | builder.append(compressed); |
| | | builder.append(", encrypted="); |
| | | builder.append(encrypted); |
| | | builder.append(", "); |
| | | if (encrypted) |
| | | { |
| | | builder.append(cryptoSuite.toString()); |
| | | builder.append(", "); |
| | | } |
| | | encodeConfig.toString(builder); |
| | | builder.append(")"); |
| | | return builder.toString(); |
| | |
| | | * information: "Portions Copyright [year] [name of copyright owner]". |
| | | * |
| | | * Copyright 2006-2010 Sun Microsystems, Inc. |
| | | * Portions Copyright 2012-2015 ForgeRock AS. |
| | | * Portions Copyright 2012-2016 ForgeRock AS. |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.UpdateFunction; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.crypto.CryptoSuite; |
| | | |
| | | /** |
| | | * Represents an index implemented by a tree in which each key maps to a set of entry IDs. The key |
| | |
| | | { |
| | | private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); |
| | | |
| | | /** The limit on the number of entry IDs that may be indexed by one key. */ |
| | | private final State state; |
| | | private final EntryContainer entryContainer; |
| | | /** The limit on the number of entry IDs that may be indexed by one key. */ |
| | | private int indexEntryLimit; |
| | | |
| | | private EntryIDSetCodec codec; |
| | | protected boolean encryptValues; |
| | | protected CryptoSuite cryptoSuite; |
| | | |
| | | /** |
| | | * A flag to indicate if this index should be trusted to be consistent with the entries tree. |
| | |
| | | { |
| | | final EnumSet<IndexFlag> flags = state.getIndexFlags(txn, getName()); |
| | | codec = flags.contains(COMPACTED) ? CODEC_V2 : CODEC_V1; |
| | | if (encryptValues) |
| | | { |
| | | codec = new EntryIDSet.EntryIDSetCodecV3(codec, cryptoSuite); |
| | | } |
| | | trusted = flags.contains(TRUSTED); |
| | | if (!trusted && entryContainer.getHighestEntryID(txn).longValue() == 0) |
| | | { |
| | |
| | | return codec.encode(entryIDSet); |
| | | } |
| | | |
| | | // Keeps temporary values during import encrypted even in on-disk buffers. |
| | | long importDecodeValue(ByteString value) |
| | | { |
| | | return encryptValues ? decodeValue(ByteString.empty(), value).iterator().next().longValue() : value.toLong(); |
| | | } |
| | | |
| | | ByteString importToValue(EntryID entryID) |
| | | { |
| | | return encryptValues ? toValue(newDefinedSet(entryID.longValue())) : entryID.toByteString(); |
| | | } |
| | | |
| | | @Override |
| | | public final void update(final WriteableTransaction txn, final ByteString key, final EntryIDSet deletedIDs, |
| | | final EntryIDSet addedIDs) throws StorageRuntimeException |
| | |
| | | } |
| | | |
| | | @Override |
| | | public boolean setProtected(boolean protectIndex) |
| | | { |
| | | final boolean rebuildRequired = this.encryptValues != protectIndex; |
| | | this.encryptValues = protectIndex; |
| | | return rebuildRequired; |
| | | } |
| | | |
| | | @Override |
| | | public final int getIndexEntryLimit() |
| | | { |
| | | return indexEntryLimit; |
| | |
| | | /** Handles the disk representation of LDAP data. */ |
| | | public class DnKeyFormat |
| | | { |
| | | /** The format version used by this class to encode and decode a ByteString. */ |
| | | static final byte FORMAT_VERSION = 0x01; |
| | | |
| | | // The following fields have been copied from the DN class in the SDK |
| | | /** RDN separator for normalized byte string of a DN. */ |
| | |
| | | import org.opends.server.core.ModifyDNOperation; |
| | | import org.opends.server.core.ModifyOperation; |
| | | import org.opends.server.core.SearchOperation; |
| | | import org.opends.server.core.ServerContext; |
| | | import org.opends.server.types.Attribute; |
| | | import org.opends.server.types.Attributes; |
| | | import org.opends.server.types.CanceledOperationException; |
| | | import org.opends.server.types.Control; |
| | | import org.opends.server.crypto.CryptoSuite; |
| | | import org.opends.server.types.DirectoryException; |
| | | import org.opends.server.types.Entry; |
| | | import org.opends.server.types.Modification; |
| | |
| | | */ |
| | | private final String treePrefix; |
| | | |
| | | private final ServerContext serverContext; |
| | | |
| | | private CryptoSuite cryptoSuite; |
| | | |
| | | /** |
| | | * This class is responsible for managing the configuration for attribute |
| | | * indexes used within this entry container. |
| | |
| | | { |
| | | try |
| | | { |
| | | new AttributeIndex(cfg, state, EntryContainer.this); |
| | | newAttributeIndex(cfg); |
| | | return true; |
| | | } |
| | | catch(Exception e) |
| | |
| | | final ConfigChangeResult ccr = new ConfigChangeResult(); |
| | | try |
| | | { |
| | | final AttributeIndex index = new AttributeIndex(cfg, state, EntryContainer.this); |
| | | final AttributeIndex index = newAttributeIndex(cfg); |
| | | storage.write(new WriteOperation() |
| | | { |
| | | @Override |
| | |
| | | final Lock sharedLock = lock.readLock(); |
| | | final Lock exclusiveLock = lock.writeLock(); |
| | | |
| | | /** |
| | | * Create a new entry container object. |
| | | * |
| | | * @param baseDN The baseDN this entry container will be responsible for |
| | | * storing on disk. |
| | | * @param backendID ID of the backend that is creating this entry container. |
| | | * It is needed by the Directory Server entry cache methods. |
| | | * @param config The configuration of the backend. |
| | | * @param storage The storage for this entryContainer. |
| | | * @param rootContainer The root container this entry container is in. |
| | | * @throws ConfigException if a configuration related error occurs. |
| | | */ |
| | | EntryContainer(DN baseDN, String backendID, PluggableBackendCfg config, Storage storage, |
| | | RootContainer rootContainer) throws ConfigException |
| | | EntryContainer(DN baseDN, String backendID, PluggableBackendCfg config, Storage storage, RootContainer rootContainer, |
| | | ServerContext serverContext) throws ConfigException |
| | | { |
| | | this.backendID = backendID; |
| | | this.baseDN = baseDN; |
| | | this.config = config; |
| | | this.storage = storage; |
| | | this.rootContainer = rootContainer; |
| | | this.serverContext = serverContext; |
| | | this.treePrefix = baseDN.toNormalizedUrlSafeString(); |
| | | this.id2childrenCount = new ID2ChildrenCount(getIndexName(ID2CHILDREN_COUNT_TREE_NAME)); |
| | | this.dn2id = new DN2ID(getIndexName(DN2ID_TREE_NAME), baseDN); |
| | |
| | | config.addBackendVLVIndexDeleteListener(vlvIndexCfgManager); |
| | | } |
| | | |
| | | private AttributeIndex newAttributeIndex(BackendIndexCfg cfg) throws ConfigException |
| | | { |
| | | return new AttributeIndex(cfg, state, this, cryptoSuite); |
| | | } |
| | | |
| | | private DataConfig newDataConfig(PluggableBackendCfg config) |
| | | { |
| | | return new DataConfig.Builder() |
| | | .compress(config.isEntriesCompressed()) |
| | | .encode(config.isCompactEncoding()) |
| | | .encrypt(config.isConfidentialityEnabled()) |
| | | .cryptoSuite(cryptoSuite) |
| | | .schema(rootContainer.getCompressedSchema()) |
| | | .build(); |
| | | } |
| | | |
| | | private TreeName getIndexName(String indexId) |
| | | { |
| | | return new TreeName(treePrefix, indexId); |
| | |
| | | boolean shouldCreate = accessMode.isWriteable(); |
| | | try |
| | | { |
| | | DataConfig entryDataConfig = new DataConfig( |
| | | config.isEntriesCompressed(), config.isCompactEncoding(), rootContainer.getCompressedSchema()); |
| | | |
| | | id2entry = new ID2Entry(getIndexName(ID2ENTRY_TREE_NAME), entryDataConfig); |
| | | cryptoSuite = serverContext.getCryptoManager().newCryptoSuite(config.getCipherTransformation(), |
| | | config.getCipherKeyLength()); |
| | | id2entry = new ID2Entry(getIndexName(ID2ENTRY_TREE_NAME), newDataConfig(config)); |
| | | id2entry.open(txn, shouldCreate); |
| | | id2childrenCount.open(txn, shouldCreate); |
| | | dn2id.open(txn, shouldCreate); |
| | |
| | | { |
| | | BackendIndexCfg indexCfg = config.getBackendIndex(idx); |
| | | |
| | | final AttributeIndex index = new AttributeIndex(indexCfg, state, this); |
| | | final AttributeIndex index = newAttributeIndex(indexCfg); |
| | | index.open(txn, shouldCreate); |
| | | if(!index.isTrusted()) |
| | | { |
| | |
| | | } |
| | | |
| | | @Override |
| | | public boolean isConfigurationChangeAcceptable( |
| | | PluggableBackendCfg cfg, List<LocalizableMessage> unacceptableReasons) |
| | | public boolean isConfigurationChangeAcceptable(PluggableBackendCfg cfg, List<LocalizableMessage> unacceptableReasons) |
| | | { |
| | | // This is always true because only all config attributes used |
| | | // by the entry container should be validated by the admin framework. |
| | | StringBuilder builder = new StringBuilder(); |
| | | for (AttributeIndex attributeIndex : attrIndexMap.values()) |
| | | { |
| | | if (attributeIndex.isConfidentialityEnabled() && !cfg.isConfidentialityEnabled()) |
| | | { |
| | | if (builder.length() > 0) |
| | | { |
| | | builder.append(", "); |
| | | } |
| | | builder.append(attributeIndex.getAttributeType().getNameOrOID()); |
| | | } |
| | | } |
| | | if (builder.length() > 0) |
| | | { |
| | | unacceptableReasons.add(ERR_BACKEND_CANNOT_CHANGE_CONFIDENTIALITY.get(getBaseDN(), builder.toString())); |
| | | return false; |
| | | } |
| | | return true; |
| | | } |
| | | |
| | |
| | | @Override |
| | | public void run(WriteableTransaction txn) throws Exception |
| | | { |
| | | DataConfig entryDataConfig = new DataConfig(cfg.isEntriesCompressed(), |
| | | cfg.isCompactEncoding(), rootContainer.getCompressedSchema()); |
| | | id2entry.setDataConfig(entryDataConfig); |
| | | cryptoSuite.setCipherTransformation(cfg.getCipherTransformation()); |
| | | cryptoSuite.setCipherKeyLength(cfg.getCipherKeyLength()); |
| | | id2entry.setDataConfig(newDataConfig(cfg)); |
| | | |
| | | EntryContainer.this.config = cfg; |
| | | } |
| | |
| | | return false; |
| | | } |
| | | |
| | | boolean isConfidentialityEnabled() |
| | | { |
| | | return config.isConfidentialityEnabled(); |
| | | } |
| | | |
| | | /** |
| | | * Fetch the base Entry of the EntryContainer. |
| | | * @param searchBaseDN the DN for the base entry |
| | |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | | |
| | | import java.security.GeneralSecurityException; |
| | | import java.util.Arrays; |
| | | import java.util.Iterator; |
| | | import java.util.List; |
| | |
| | | import org.forgerock.util.Reject; |
| | | |
| | | import com.forgerock.opendj.util.Iterators; |
| | | import org.opends.server.types.CryptoManagerException; |
| | | import org.opends.server.crypto.CryptoSuite; |
| | | |
| | | /** |
| | | * Represents a set of Entry IDs. It can represent a set where the IDs are not defined, for example when the index entry |
| | |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Decorate a V1 or V2 codec with encryption. When writing EntryIDSets to disk, |
| | | * prepend two bytes, {0, 1} to mark them as encrypted. |
| | | * The first is tag zero (unused in other encodings), followed by a byte |
| | | * indicating version 1 of encryption. |
| | | */ |
| | | static class EntryIDSetCodecV3 implements EntryIDSetCodec |
| | | { |
| | | private static final byte CODEC_V3_TAG = 0x00; |
| | | private static final byte CODEC_V3_VERSION = 0x01; |
| | | private final EntryIDSetCodec delegate; |
| | | private final CryptoSuite cryptoSuite; |
| | | EntryIDSetCodecV3(EntryIDSetCodec delegate, CryptoSuite cryptoSuite) |
| | | { |
| | | this.delegate = delegate; |
| | | this.cryptoSuite = cryptoSuite; |
| | | } |
| | | |
| | | @Override |
| | | public ByteString encode(EntryIDSet idSet) |
| | | { |
| | | ByteString encodedValue = delegate.encode(idSet); |
| | | ByteStringBuilder builder = new ByteStringBuilder(encodedValue.length()); |
| | | builder.appendByte(CODEC_V3_TAG); |
| | | builder.appendByte(CODEC_V3_VERSION); |
| | | try |
| | | { |
| | | builder.appendBytes(cryptoSuite.encrypt(encodedValue.toByteArray())); |
| | | return builder.toByteString(); |
| | | } |
| | | catch (GeneralSecurityException | CryptoManagerException e) |
| | | { |
| | | // Only if the underlying crypto provider has serious problems. |
| | | throw new IllegalStateException(); |
| | | } |
| | | } |
| | | |
| | | @Override |
| | | public EntryIDSet decode(ByteSequence key, ByteString value) |
| | | { |
| | | checkNotNull(value, "value must not be null"); |
| | | if (value.byteAt(0) == CODEC_V3_TAG) |
| | | { |
| | | try |
| | | { |
| | | return delegate.decode(key, |
| | | ByteString.wrap(cryptoSuite.decrypt(value.subSequence(2, value.length()).toByteArray()))); |
| | | } |
| | | catch (GeneralSecurityException | CryptoManagerException e) |
| | | { |
| | | // Only if data is completely corrupted. |
| | | throw new IllegalStateException(); |
| | | } |
| | | } |
| | | return delegate.decode(key, value); |
| | | } |
| | | } |
| | | |
| | | static EntryIDSetCodec newEntryIDSetCodecV3(EntryIDSetCodec codec, CryptoSuite cs) |
| | | { |
| | | return new EntryIDSetCodecV3(codec, cs); |
| | | } |
| | | |
| | | static EntryIDSet newUndefinedSet() |
| | | { |
| | | return newUndefinedSetWithKey(NO_KEY); |
| | |
| | | private void exportContainer(ReadableTransaction txn, EntryContainer entryContainer) |
| | | throws StorageRuntimeException, IOException, LDIFException |
| | | { |
| | | Cursor<ByteString, ByteString> cursor = txn.openCursor(entryContainer.getID2Entry().getName()); |
| | | try |
| | | ID2Entry id2entry = entryContainer.getID2Entry(); |
| | | try (final Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName())) |
| | | { |
| | | while (cursor.next()) |
| | | { |
| | |
| | | Entry entry = null; |
| | | try |
| | | { |
| | | entry = ID2Entry.entryFromDatabase(value, |
| | | entryContainer.getRootContainer().getCompressedSchema()); |
| | | entry = id2entry.entryFromDatabase(value, entryContainer.getRootContainer().getCompressedSchema()); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | |
| | | } |
| | | } |
| | | } |
| | | finally |
| | | { |
| | | cursor.close(); |
| | | } |
| | | } |
| | | |
| | | /** This class reports progress of the export job at fixed intervals. */ |
| | |
| | | * information: "Portions Copyright [year] [name of copyright owner]". |
| | | * |
| | | * Copyright 2006-2010 Sun Microsystems, Inc. |
| | | * Portions Copyright 2012-2015 ForgeRock AS. |
| | | * Portions Copyright 2012-2016 ForgeRock AS. |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | | import static org.forgerock.opendj.ldap.ResultCode.UNWILLING_TO_PERFORM; |
| | | import static org.forgerock.util.Reject.*; |
| | | import static org.forgerock.util.Utils.*; |
| | | import static org.opends.messages.BackendMessages.*; |
| | |
| | | import static org.opends.server.core.DirectoryServer.*; |
| | | |
| | | import java.io.IOException; |
| | | import java.io.InputStream; |
| | | import java.io.OutputStream; |
| | | import java.util.zip.DataFormatException; |
| | | import java.util.zip.DeflaterOutputStream; |
| | | import java.util.zip.InflaterInputStream; |
| | | import java.util.zip.InflaterOutputStream; |
| | | |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | | import org.forgerock.opendj.io.ASN1; |
| | | import org.forgerock.opendj.io.ASN1Reader; |
| | | import org.forgerock.opendj.io.ASN1Writer; |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteSequenceReader; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.ByteStringBuilder; |
| | | import org.forgerock.opendj.ldap.DecodeException; |
| | |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.backends.pluggable.spi.WriteableTransaction; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.opends.server.types.CryptoManagerException; |
| | | import org.opends.server.types.DirectoryException; |
| | | import org.opends.server.types.Entry; |
| | | import org.opends.server.types.LDAPException; |
| | |
| | | /** A cached set of ByteStringBuilder buffers and ASN1Writer used to encode entries. */ |
| | | private static final class EntryCodec |
| | | { |
| | | /** |
| | | * The format version used encode and decode entries in previous versions. |
| | | * Not used anymore, kept for compatibility during upgrade. |
| | | */ |
| | | static final byte FORMAT_VERSION = 0x01; |
| | | |
| | | /** The ASN1 tag for the ByteString type. */ |
| | | private static final byte TAG_TREE_ENTRY = 0x60; |
| | | private static final int BUFFER_INIT_SIZE = 512; |
| | | private static final byte PLAIN_ENTRY = 0x00; |
| | | private static final byte COMPRESS_ENTRY = 0x01; |
| | | private static final byte ENCRYPT_ENTRY = 0x02; |
| | | |
| | | /** The format version for entry encoding. */ |
| | | static final byte FORMAT_VERSION_V2 = 0x02; |
| | | |
| | | private final ByteStringBuilder encodedBuffer = new ByteStringBuilder(); |
| | | private final ByteStringBuilder entryBuffer = new ByteStringBuilder(); |
| | | private final ByteStringBuilder compressedEntryBuffer = new ByteStringBuilder(); |
| | | private final ASN1Writer writer; |
| | | private final int maxBufferSize; |
| | | |
| | | private EntryCodec() |
| | | { |
| | | this.maxBufferSize = getMaxInternalBufferSize(); |
| | | this.writer = ASN1.getWriter(encodedBuffer, maxBufferSize); |
| | | } |
| | | |
| | | private void release() |
| | | { |
| | | closeSilently(writer); |
| | | encodedBuffer.clearAndTruncate(maxBufferSize, BUFFER_INIT_SIZE); |
| | | entryBuffer.clearAndTruncate(maxBufferSize, BUFFER_INIT_SIZE); |
| | | compressedEntryBuffer.clearAndTruncate(maxBufferSize, BUFFER_INIT_SIZE); |
| | | } |
| | | |
| | | private Entry decode(ByteString bytes, CompressedSchema compressedSchema) |
| | | throws DirectoryException, DecodeException, IOException |
| | | { |
| | | // Get the format version. |
| | | byte formatVersion = bytes.byteAt(0); |
| | | if(formatVersion != DnKeyFormat.FORMAT_VERSION) |
| | | final byte formatVersion = bytes.byteAt(0); |
| | | switch(formatVersion) |
| | | { |
| | | case FORMAT_VERSION: |
| | | return decodeV1(bytes, compressedSchema); |
| | | case FORMAT_VERSION_V2: |
| | | return decodeV2(bytes, compressedSchema); |
| | | default: |
| | | throw DecodeException.error(ERR_INCOMPATIBLE_ENTRY_VERSION.get(formatVersion)); |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Decodes an entry from the old format. |
| | | * <p> |
| | | * An entry on disk is ASN1 encoded in this format: |
| | | * |
| | | * <pre> |
| | | * ByteString ::= [APPLICATION 0] IMPLICIT SEQUENCE { |
| | | * uncompressedSize INTEGER, -- A zero value means not compressed. |
| | | * dataBytes OCTET STRING -- Optionally compressed encoding of |
| | | * the data bytes. |
| | | * } |
| | | * |
| | | * ID2EntryValue ::= ByteString |
| | | * -- Where dataBytes contains an encoding of DirectoryServerEntry. |
| | | * |
| | | * DirectoryServerEntry ::= [APPLICATION 1] IMPLICIT SEQUENCE { |
| | | * dn LDAPDN, |
| | | * objectClasses SET OF LDAPString, |
| | | * userAttributes AttributeList, |
| | | * operationalAttributes AttributeList |
| | | * } |
| | | * </pre> |
| | | * |
| | | * @param bytes A byte array containing the encoded tree value. |
| | | * @param compressedSchema The compressed schema manager to use when decoding. |
| | | * @return The decoded entry. |
| | | * @throws DecodeException If the data is not in the expected ASN.1 encoding |
| | | * format. |
| | | * @throws DirectoryException If a Directory Server error occurs. |
| | | * @throws IOException if an error occurs while reading the ASN1 sequence. |
| | | */ |
| | | private Entry decodeV1(ByteString bytes, CompressedSchema compressedSchema) |
| | | throws DirectoryException, DecodeException, IOException |
| | | { |
| | | // Read the ASN1 sequence. |
| | | ASN1Reader reader = ASN1.getReader(bytes.subSequence(1, bytes.length())); |
| | | reader.readStartSequence(); |
| | |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * Decodes an entry in the new extensible format. |
| | | * Enties are encoded according to the sequence |
| | | * {VERSION_BYTE, FLAG_BYTE, COMPACT_INTEGER_LENGTH, ID2ENTRY_VALUE} |
| | | * where |
| | | * |
| | | * ID2ENTRY_VALUE = encoding of Entry as in decodeV1() |
| | | * VERSION_BYTE = 0x2 |
| | | * FLAG_BYTE = bit field of OR'ed values indicating post-encoding processing. |
| | | * possible meaningful flags are COMPRESS_ENTRY and ENCRYPT_ENTRY. |
| | | * COMPACT_INTEGER_LENGTH = length of ID2ENTRY_VALUE |
| | | * |
| | | * @param bytes A byte array containing the encoded tree value. |
| | | * @param compressedSchema The compressed schema manager to use when decoding. |
| | | * @return The decoded entry. |
| | | * @throws DecodeException If the data is not in the expected ASN.1 encoding |
| | | * format or a decryption error occurs. |
| | | * @throws DirectoryException If a Directory Server error occurs. |
| | | * @throws IOException if an error occurs while reading the ASN1 sequence. |
| | | */ |
| | | private Entry decodeV2(ByteString bytes, CompressedSchema compressedSchema) |
| | | throws DirectoryException, DecodeException, IOException |
| | | { |
| | | ByteSequenceReader reader = bytes.asReader(); |
| | | // skip version byte |
| | | reader.position(1); |
| | | int format = reader.readByte(); |
| | | int encodedEntryLen = reader.readCompactUnsignedInt(); |
| | | try |
| | | { |
| | | if (format == PLAIN_ENTRY) |
| | | { |
| | | return Entry.decode(reader, compressedSchema); |
| | | } |
| | | InputStream is = reader.asInputStream(); |
| | | if ((format & ENCRYPT_ENTRY) == ENCRYPT_ENTRY) |
| | | { |
| | | is = getCryptoManager().getCipherInputStream(is); |
| | | } |
| | | if ((format & COMPRESS_ENTRY) == COMPRESS_ENTRY) |
| | | { |
| | | is = new InflaterInputStream(is); |
| | | } |
| | | byte[] data = new byte[encodedEntryLen]; |
| | | int readBytes; |
| | | int position = 0; |
| | | int leftToRead = encodedEntryLen; |
| | | // CipherInputStream does not read more than block size... |
| | | do |
| | | { |
| | | if ((readBytes = is.read(data, position, leftToRead)) == -1 ) |
| | | { |
| | | throw DecodeException.error(ERR_CANNOT_DECODE_ENTRY.get()); |
| | | } |
| | | position += readBytes; |
| | | leftToRead -= readBytes; |
| | | } while (leftToRead > 0 && readBytes > 0); |
| | | return Entry.decode(ByteString.wrap(data).asReader(), compressedSchema); |
| | | } |
| | | catch (CryptoManagerException cme) |
| | | { |
| | | throw DecodeException.error(cme.getMessageObject()); |
| | | } |
| | | } |
| | | |
| | | private ByteString encode(Entry entry, DataConfig dataConfig) throws DirectoryException |
| | | { |
| | | encodeVolatile(entry, dataConfig); |
| | |
| | | |
| | | private void encodeVolatile(Entry entry, DataConfig dataConfig) throws DirectoryException |
| | | { |
| | | // Encode the entry for later use. |
| | | entry.encode(entryBuffer, dataConfig.getEntryEncodeConfig()); |
| | | |
| | | // First write the DB format version byte. |
| | | encodedBuffer.appendByte(DnKeyFormat.FORMAT_VERSION); |
| | | |
| | | OutputStream os = encodedBuffer.asOutputStream(); |
| | | try |
| | | { |
| | | // Then start the ASN1 sequence. |
| | | writer.writeStartSequence(TAG_TREE_ENTRY); |
| | | |
| | | byte[] formatFlags = { FORMAT_VERSION_V2, 0}; |
| | | os.write(formatFlags); |
| | | encodedBuffer.appendCompactUnsigned(entryBuffer.length()); |
| | | if (dataConfig.isCompressed()) |
| | | { |
| | | OutputStream compressor = null; |
| | | try { |
| | | compressor = new DeflaterOutputStream(compressedEntryBuffer.asOutputStream()); |
| | | entryBuffer.copyTo(compressor); |
| | | } |
| | | finally { |
| | | closeSilently(compressor); |
| | | } |
| | | |
| | | // Compression needed and successful. |
| | | writer.writeInteger(entryBuffer.length()); |
| | | writer.writeOctetString(compressedEntryBuffer); |
| | | os = new DeflaterOutputStream(os); |
| | | formatFlags[1] = COMPRESS_ENTRY; |
| | | } |
| | | else |
| | | if (dataConfig.isEncrypted()) |
| | | { |
| | | writer.writeInteger(0); |
| | | writer.writeOctetString(entryBuffer); |
| | | os = dataConfig.getCryptoSuite().getCipherOutputStream(os); |
| | | formatFlags[1] |= ENCRYPT_ENTRY; |
| | | } |
| | | encodedBuffer.setByte(1, formatFlags[1]); |
| | | |
| | | writer.writeEndSequence(); |
| | | entryBuffer.copyTo(os); |
| | | os.flush(); |
| | | } |
| | | catch(IOException ioe) |
| | | catch(CryptoManagerException | IOException e) |
| | | { |
| | | // TODO: This should never happen with byte buffer. |
| | | logger.traceException(ioe); |
| | | logger.traceException(e); |
| | | throw new DirectoryException(UNWILLING_TO_PERFORM, ERR_CANNOT_ENCODE_ENTRY.get(e.getLocalizedMessage())); |
| | | } |
| | | finally |
| | | { |
| | | try |
| | | { |
| | | os.close(); |
| | | } |
| | | catch (IOException ioe) |
| | | { |
| | | throw new DirectoryException(UNWILLING_TO_PERFORM, ERR_CANNOT_ENCODE_ENTRY.get(ioe.getLocalizedMessage())); |
| | | } |
| | | } |
| | | } |
| | | } |
| | |
| | | |
| | | /** |
| | | * Decodes an entry from its tree representation. |
| | | * <p> |
| | | * An entry on disk is ASN1 encoded in this format: |
| | | * |
| | | * <pre> |
| | | * ByteString ::= [APPLICATION 0] IMPLICIT SEQUENCE { |
| | | * uncompressedSize INTEGER, -- A zero value means not compressed. |
| | | * dataBytes OCTET STRING -- Optionally compressed encoding of |
| | | * the data bytes. |
| | | * } |
| | | * |
| | | * ID2EntryValue ::= ByteString |
| | | * -- Where dataBytes contains an encoding of DirectoryServerEntry. |
| | | * |
| | | * DirectoryServerEntry ::= [APPLICATION 1] IMPLICIT SEQUENCE { |
| | | * dn LDAPDN, |
| | | * objectClasses SET OF LDAPString, |
| | | * userAttributes AttributeList, |
| | | * operationalAttributes AttributeList |
| | | * } |
| | | * </pre> |
| | | * |
| | | * @param bytes A byte array containing the encoded tree value. |
| | | * @param compressedSchema The compressed schema manager to use when decoding. |
| | |
| | | * @throws DirectoryException If a Directory Server error occurs. |
| | | * @throws IOException if an error occurs while reading the ASN1 sequence. |
| | | */ |
| | | static Entry entryFromDatabase(ByteString bytes, |
| | | Entry entryFromDatabase(ByteString bytes, |
| | | CompressedSchema compressedSchema) throws DirectoryException, |
| | | DecodeException, LDAPException, DataFormatException, IOException |
| | | { |
| | |
| | | * @throws DirectoryException If a problem occurs while attempting to encode |
| | | * the entry. |
| | | */ |
| | | static ByteString entryToDatabase(Entry entry, DataConfig dataConfig) throws DirectoryException |
| | | ByteString entryToDatabase(Entry entry, DataConfig dataConfig) throws DirectoryException |
| | | { |
| | | EntryCodec codec = acquireEntryCodec(); |
| | | try |
| | |
| | | * information: "Portions Copyright [year] [name of copyright owner]". |
| | | * |
| | | * Copyright 2006-2010 Sun Microsystems, Inc. |
| | | * Portions Copyright 2012-2015 ForgeRock AS. |
| | | * Portions Copyright 2012-2016 ForgeRock AS. |
| | | */ |
| | | package org.opends.server.backends.pluggable; |
| | | |
| | |
| | | |
| | | boolean setIndexEntryLimit(int indexEntryLimit); |
| | | |
| | | boolean setProtected(boolean protectIndex); |
| | | |
| | | void setTrusted(WriteableTransaction txn, boolean trusted); |
| | | |
| | | void update(WriteableTransaction txn, ByteString key, EntryIDSet deletedIDs, EntryIDSet addedIDs); |
| | |
| | | |
| | | void writeTrustState(WriteableTransaction txn) throws StorageRuntimeException; |
| | | |
| | | void put(Index index, ByteString key, EntryID entryID); |
| | | void put(DefaultIndex index, ByteString key, EntryID entryID); |
| | | |
| | | void put(VLVIndex index, ByteString sortKey); |
| | | |
| | |
| | | } |
| | | |
| | | @Override |
| | | public void put(Index index, ByteString key, EntryID entryID) |
| | | public void put(DefaultIndex index, ByteString key, EntryID entryID) |
| | | { |
| | | createOrGetBufferedIndexValues(index, key).addEntryID(entryID); |
| | | } |
| | |
| | | { |
| | | private final WriteableTransaction txn; |
| | | private final EntryID expectedEntryID; |
| | | private final ByteString encodedEntryID; |
| | | |
| | | ImportIndexBuffer(WriteableTransaction txn, EntryID expectedEntryID) |
| | | { |
| | | this.txn = txn; |
| | | this.expectedEntryID = expectedEntryID; |
| | | this.encodedEntryID = ByteString.valueOfLong(expectedEntryID.longValue()); |
| | | } |
| | | |
| | | @Override |
| | | public void put(Index index, ByteString key, EntryID entryID) |
| | | public void put(DefaultIndex index, ByteString key, EntryID entryID) |
| | | { |
| | | Reject.ifFalse(this.expectedEntryID.equals(entryID), "Unexpected entryID"); |
| | | txn.put(index.getName(), key, encodedEntryID); |
| | | txn.put(index.getName(), key, index.importToValue(entryID)); |
| | | } |
| | | |
| | | @Override |
| | |
| | | impl.writeTrustState(txn); |
| | | } |
| | | |
| | | void put(Index index, ByteString key, EntryID entryID) |
| | | void put(DefaultIndex index, ByteString key, EntryID entryID) |
| | | { |
| | | impl.put(index, key, entryID); |
| | | } |
| | |
| | | import java.util.ArrayList; |
| | | import java.util.Collection; |
| | | |
| | | import org.forgerock.i18n.LocalizableMessage; |
| | | import org.forgerock.i18n.LocalizableMessageBuilder; |
| | | import org.forgerock.i18n.slf4j.LocalizedLogger; |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.DecodeException; |
| | | import org.forgerock.opendj.ldap.schema.AttributeType; |
| | | import org.forgerock.opendj.ldap.spi.IndexQueryFactory; |
| | | import org.forgerock.opendj.ldap.spi.IndexingOptions; |
| | |
| | | { |
| | | return new IndexQuery() |
| | | { |
| | | |
| | | @Override |
| | | public EntryIDSet evaluate(LocalizableMessageBuilder debugMessage, StringBuilder indexNameOut) |
| | | { |
| | | // Read the tree and get Record for the key. |
| | | // Select the right index to be used. |
| | | final Index index = attributeIndex.getNameToIndexes().get(indexID); |
| | | Index index = attributeIndex.getNameToIndexes().get(indexID); |
| | | ByteSequence indexKey = key; |
| | | if (index == null) |
| | | { |
| | | appendDisabledIndexType(debugMessage, indexID, attributeIndex.getAttributeType()); |
| | | return createMatchAllQuery().evaluate(debugMessage, indexNameOut); |
| | | index = attributeIndex.getNameToIndexes().get(indexID + AttributeIndex.PROTECTED_INDEX_ID); |
| | | if (index == null) |
| | | { |
| | | appendDisabledIndexType(debugMessage, indexID, attributeIndex.getAttributeType()); |
| | | return createMatchAllQuery().evaluate(debugMessage, indexNameOut); |
| | | } |
| | | try |
| | | { |
| | | indexKey = attributeIndex.getCryptoSuite().hash48(key); |
| | | } |
| | | catch (DecodeException de) |
| | | { |
| | | appendExceptionError(debugMessage, de.getMessageObject()); |
| | | return createMatchAllQuery().evaluate(debugMessage, indexNameOut); |
| | | } |
| | | } |
| | | |
| | | final EntryIDSet entrySet = index.get(txn, key); |
| | | final EntryIDSet entrySet = index.get(txn, indexKey); |
| | | updateStatsForUndefinedResults(debugMessage, entrySet, index); |
| | | return entrySet; |
| | | } |
| | |
| | | }; |
| | | } |
| | | |
| | | private static void appendExceptionError(LocalizableMessageBuilder debugMessage, LocalizableMessage msg) |
| | | { |
| | | if (debugMessage != null) |
| | | { |
| | | debugMessage.append(msg); |
| | | } |
| | | } |
| | | |
| | | private static void appendDisabledIndexType(LocalizableMessageBuilder debugMessage, String indexID, |
| | | AttributeType attrType) |
| | | { |
| | |
| | | Executors.newSingleThreadScheduledExecutor(newThreadFactory(null, PHASE1_REPORTER_THREAD_NAME, true)); |
| | | scheduler.scheduleAtFixedRate(new PhaseOneProgressReporter(), 10, 10, TimeUnit.SECONDS); |
| | | final PromiseImpl<Void, Exception> promise = PromiseImpl.create(); |
| | | try (final SequentialCursor<ByteString, ByteString> cursor = |
| | | importer.openCursor(entryContainer.getID2Entry().getName())) |
| | | final ID2Entry id2Entry = entryContainer.getID2Entry(); |
| | | try (final SequentialCursor<ByteString, ByteString> cursor = importer.openCursor(id2Entry.getName())) |
| | | { |
| | | while (cursor.next()) |
| | | { |
| | |
| | | try |
| | | { |
| | | entryProcessor.processEntry(entryContainer, |
| | | new EntryID(key), ID2Entry.entryFromDatabase(value, schema)); |
| | | new EntryID(key), id2Entry.entryFromDatabase(value, schema)); |
| | | nbEntriesProcessed.incrementAndGet(); |
| | | } |
| | | catch (Exception e) |
| | |
| | | { |
| | | if (resultContainer.size() < indexLimit) |
| | | { |
| | | resultContainer.add(value.toLong()); |
| | | resultContainer.add(index.importDecodeValue(value)); |
| | | } |
| | | /* |
| | | * else EntryIDSet is above index entry limits, discard additional values |
| | |
| | | EntryContainer openEntryContainer(DN baseDN, WriteableTransaction txn, AccessMode accessMode) |
| | | throws StorageRuntimeException, ConfigException |
| | | { |
| | | EntryContainer ec = new EntryContainer(baseDN, backendId, config, storage, this); |
| | | EntryContainer ec = new EntryContainer(baseDN, backendId, config, storage, this, serverContext); |
| | | ec.open(txn, accessMode); |
| | | return ec; |
| | | } |
| | |
| | | Entry entry; |
| | | try |
| | | { |
| | | entry = ID2Entry.entryFromDatabase(value, rootContainer.getCompressedSchema()); |
| | | entry = id2entry.entryFromDatabase(value, rootContainer.getCompressedSchema()); |
| | | } |
| | | catch (Exception e) |
| | | { |
| | |
| | | import org.opends.server.types.AcceptRejectWarn; |
| | | import org.opends.server.types.BackupConfig; |
| | | import org.opends.server.types.Control; |
| | | import org.opends.server.types.CryptoManager; |
| | | import org.opends.server.types.DITContentRule; |
| | | import org.opends.server.types.DITStructureRule; |
| | | import org.opends.server.types.DirectoryEnvironmentConfig; |
| | |
| | | { |
| | | return directoryServer.loggerConfigManager; |
| | | } |
| | | |
| | | @Override |
| | | public CryptoManager getCryptoManager() |
| | | { |
| | | return directoryServer.cryptoManager; |
| | | } |
| | | } |
| | | |
| | | /** |
| | |
| | | import org.forgerock.opendj.server.config.server.RootCfg; |
| | | import org.opends.server.extensions.DiskSpaceMonitor; |
| | | import org.opends.server.loggers.CommonAudit; |
| | | import org.opends.server.types.CryptoManager; |
| | | import org.opends.server.types.DirectoryEnvironmentConfig; |
| | | import org.opends.server.types.Schema; |
| | | |
| | |
| | | * @return the logger config manager |
| | | */ |
| | | LoggerConfigManager getLoggerConfigManager(); |
| | | |
| | | /** |
| | | * Returns the Crypto Manager for the instance. |
| | | * |
| | | * @return the Crypto Manager for the instance |
| | | */ |
| | | CryptoManager getCryptoManager(); |
| | | } |
| | |
| | | { |
| | | return sslCipherSuites; |
| | | } |
| | | |
| | | @Override |
| | | public CryptoSuite newCryptoSuite(String cipherTransformation, int cipherKeyLength) |
| | | { |
| | | return new CryptoSuite(this, cipherTransformation, cipherKeyLength); |
| | | } |
| | | } |
| New file |
| | |
| | | /* |
| | | * The contents of this file are subject to the terms of the Common Development and |
| | | * Distribution License (the License). You may not use this file except in compliance with the |
| | | * License. |
| | | * |
| | | * You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the |
| | | * specific language governing permission and limitations under the License. |
| | | * |
| | | * When distributing Covered Software, include this CDDL Header Notice in each file and include |
| | | * the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL |
| | | * Header, with the fields enclosed by brackets [] replaced by your own identifying |
| | | * information: "Portions Copyright [year] [name of copyright owner]". |
| | | * |
| | | * Copyright 2016 ForgeRock AS. |
| | | */ |
| | | package org.opends.server.crypto; |
| | | |
| | | import org.forgerock.opendj.ldap.ByteSequence; |
| | | import org.forgerock.opendj.ldap.ByteString; |
| | | import org.forgerock.opendj.ldap.DecodeException; |
| | | import org.opends.server.types.CryptoManager; |
| | | import org.opends.server.types.CryptoManagerException; |
| | | |
| | | import javax.crypto.CipherInputStream; |
| | | import javax.crypto.CipherOutputStream; |
| | | import java.io.InputStream; |
| | | import java.io.OutputStream; |
| | | import java.security.GeneralSecurityException; |
| | | import java.security.NoSuchAlgorithmException; |
| | | |
| | | import static org.opends.messages.CoreMessages.*; |
| | | |
| | | /** Defines cipher transformation and hash algorithm for cryptographic related operations. */ |
| | | public class CryptoSuite |
| | | { |
| | | private String cipherTransformation; |
| | | private int cipherKeyLength; |
| | | private final CryptoManager cryptoManager; |
| | | |
| | | /** |
| | | * Declares a new CryptoSuite with provided parameters. |
| | | * @param cryptoManager the CryptoManager to use for cryptographic operations |
| | | * @param cipherTransformation the initial cipher transformation |
| | | * @param cipherKeyLength the initial key length for the cipher |
| | | */ |
| | | public CryptoSuite(CryptoManager cryptoManager, String cipherTransformation, int cipherKeyLength) |
| | | { |
| | | this.cryptoManager = cryptoManager; |
| | | this.cipherTransformation = cipherTransformation; |
| | | this.cipherKeyLength = cipherKeyLength; |
| | | } |
| | | |
| | | /** |
| | | * Returns the cipher transformation to use. |
| | | * |
| | | * @return the cipher transformation to use |
| | | */ |
| | | public String getCipherTransformation() |
| | | { |
| | | return cipherTransformation; |
| | | } |
| | | |
| | | /** |
| | | * Returns the cipher key length to use. |
| | | * |
| | | * @return the cipher key length to use |
| | | */ |
| | | public int getCipherKeyLength() |
| | | { |
| | | return cipherKeyLength; |
| | | } |
| | | |
| | | /** |
| | | * Sets the cipher transformation for the CryptoSuite. |
| | | * |
| | | * @param cipherTransformation the new cipher transformation |
| | | */ |
| | | public void setCipherTransformation(String cipherTransformation) |
| | | { |
| | | this.cipherTransformation = cipherTransformation; |
| | | } |
| | | |
| | | /** |
| | | * Sets the key length for the CryptoSuite. |
| | | * |
| | | * @param cipherKeyLength the new key length |
| | | */ |
| | | public void setCipherKeyLength(int cipherKeyLength) |
| | | { |
| | | this.cipherKeyLength = cipherKeyLength; |
| | | } |
| | | |
| | | /** |
| | | * Decrypts data using the key specified in the prologue. |
| | | * |
| | | * @param data the cipher-text to be decrypted (contains prologue) |
| | | * @return a byte array with the clear-text |
| | | * @throws GeneralSecurityException if a problem occurs while decrypting the data |
| | | * @throws CryptoManagerException if a problem occurs during cipher initialization |
| | | */ |
| | | public byte[] decrypt(byte[] data) throws GeneralSecurityException, CryptoManagerException |
| | | { |
| | | return cryptoManager.decrypt(data); |
| | | } |
| | | |
| | | /** |
| | | * Encrypts data with the configured cipher transformation and key length. |
| | | * |
| | | * @param data the clear-text data to encrypt |
| | | * @return a byte array with a prologue containing the key identifier followed by cipher-text |
| | | * @throws GeneralSecurityException if a problem occurs while encrypting the data |
| | | * @throws CryptoManagerException if a problem occurs during cipher initialization |
| | | */ |
| | | public byte[] encrypt(byte[] data) throws GeneralSecurityException, CryptoManagerException |
| | | { |
| | | return cryptoManager.encrypt(cipherTransformation, cipherKeyLength, data); |
| | | } |
| | | |
| | | /** |
| | | * Returns a {@link CipherOutputStream} for encrypting through a sequence of |
| | | * OutputStreams. |
| | | * |
| | | * @param os the up-link OutputStream |
| | | * @return a {@link CipherOutputStream} for encrypting through a sequence of |
| | | * OutputStreams |
| | | * @throws CryptoManagerException if a problem occurs during cipher initialization |
| | | */ |
| | | public CipherOutputStream getCipherOutputStream(OutputStream os) throws CryptoManagerException |
| | | { |
| | | return cryptoManager.getCipherOutputStream(cipherTransformation, cipherKeyLength, os); |
| | | } |
| | | |
| | | /** |
| | | * Returns a {@link CipherInputStream} for decrypting through a sequence of InputStreams. |
| | | * |
| | | * @param is the up-link InputStream |
| | | * @return a {@link CipherInputStream} for decrypting through a sequence of InputStreams. |
| | | * @throws CryptoManagerException if a problem occurs during cipher initialization |
| | | */ |
| | | public CipherInputStream getCipherInputStream(InputStream is) throws CryptoManagerException |
| | | { |
| | | return cryptoManager.getCipherInputStream(is); |
| | | } |
| | | |
| | | /** |
| | | * Returns a ByteString of 6 bytes hash of the data. |
| | | * |
| | | * @param data a ByteSequence containing the input data to be hashed |
| | | * @return a ByteString of 6 bytes hash of the data. |
| | | * @throws DecodeException if digest of the data cannot be computed |
| | | */ |
| | | public ByteString hash48(ByteSequence data) throws DecodeException |
| | | { |
| | | try |
| | | { |
| | | byte[] hash = cryptoManager.digest("SHA-1", data.toByteArray()); |
| | | return ByteString.valueOfBytes(hash, 0, 6); |
| | | } |
| | | catch (NoSuchAlgorithmException e) |
| | | { |
| | | throw DecodeException.error(ERR_CANNOT_HASH_DATA.get()); |
| | | } |
| | | } |
| | | |
| | | @Override |
| | | public String toString() |
| | | { |
| | | StringBuilder builder = new StringBuilder(); |
| | | builder.append("CryptoSuite(cipherTransformation="); |
| | | builder.append(cipherTransformation); |
| | | builder.append(", keyLength="); |
| | | builder.append(cipherKeyLength); |
| | | builder.append(")"); |
| | | return builder.toString(); |
| | | } |
| | | } |
| | |
| | | * information: "Portions Copyright [year] [name of copyright owner]". |
| | | * |
| | | * Copyright 2006-2009 Sun Microsystems, Inc. |
| | | * Portions Copyright 2014-2015 ForgeRock AS. |
| | | * Portions Copyright 2014-2016 ForgeRock AS. |
| | | */ |
| | | package org.opends.server.types; |
| | | |
| | | import org.forgerock.opendj.config.server.ConfigException; |
| | | import org.opends.server.crypto.CryptoSuite; |
| | | |
| | | import javax.crypto.Mac; |
| | | import javax.crypto.CipherOutputStream; |
| | |
| | | * @return The set of enabled SSL cipher suites. |
| | | */ |
| | | SortedSet<String> getSslCipherSuites(); |
| | | |
| | | /** |
| | | * Return a new {@link CryptoSuite} for the cipher and key. |
| | | * |
| | | * @return a new {@link CryptoSuite} for the cipher and key |
| | | * @param cipherTransformation cipher transformation string specification |
| | | * @param cipherKeyLength length of key in bits |
| | | */ |
| | | CryptoSuite newCryptoSuite(String cipherTransformation, int cipherKeyLength); |
| | | } |
| | |
| | | ERR_SCHEMA_PARSE_LINE_600=Ignoring schema definition '%s' because the following error occurred while \ |
| | | it was being parsed: %s |
| | | ERR_SCHEMA_COULD_NOT_PARSE_DEFINITION_601=Schema definition could not be parsed as valid attribute value |
| | | ERR_CLEARTEXT_BACKEND_FOR_INDEX_CONFIDENTIALITY_602=Attribute %s is set as confidential on a backend \ |
| | | whose entries are still cleartext. Enable confidentiality on the backend first |
| | | ERR_CONFIG_INDEX_CANNOT_PROTECT_BOTH_603=The attribute '%s' cannot enable confidentiality for keys and values \ |
| | | at the same time |
| | | ERR_CANNOT_ENCODE_ENTRY_604=Cannot encode entry for writing on storage: %s |
| | | ERR_CANNOT_DECODE_ENTRY_605=Input stream ended unexpectedly while decoding entry |
| | | ERR_BACKEND_CANNOT_CHANGE_CONFIDENTIALITY_606=Confidentiality cannot be disabled on suffix '%s' because the \ |
| | | following indexes have confidentiality still enabled: %s |
| | | NOTE_CONFIG_INDEX_CONFIDENTIALITY_REQUIRES_REBUILD_607=Changing confidentiality for index '%s' requires the index \ |
| | | to be rebuilt before it can be used again |
| | |
| | | Write operations to the backend, replication updates included, will fail until the free space rises above the threshold |
| | | NOTE_DISK_SPACE_RESTORED_751=The free space (%d bytes) on the disk containing directory %s is now above the \ |
| | | threshold |
| | | ERR_CANNOT_HASH_DATA_752=Cannot properly use SHA-1 using the java provider. Verify java.security is properly configured |
| | |
| | | import org.forgerock.opendj.ldap.DN; |
| | | import org.opends.server.DirectoryServerTestCase; |
| | | import org.opends.server.TestCaseUtils; |
| | | import org.opends.server.backends.pluggable.spi.TreeName; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.forgerock.opendj.ldap.schema.AttributeType; |
| | | import org.opends.server.types.*; |
| | |
| | | try (final LDIFReader reader = new LDIFReader(new LDIFImportConfig(new ByteArrayInputStream(originalLDIFBytes)))) |
| | | { |
| | | Entry entryBefore, entryAfter; |
| | | DataConfig dataConfig = new DataConfig.Builder().compress(false).encode(false).build(); |
| | | ID2Entry id2entry = new ID2Entry(new TreeName("o=test", "id2entry"), dataConfig); |
| | | while ((entryBefore = reader.readEntry(false)) != null) { |
| | | ByteString bytes = ID2Entry.entryToDatabase(entryBefore, |
| | | new DataConfig(false, false, null)); |
| | | ByteString bytes = id2entry.entryToDatabase(entryBefore, dataConfig); |
| | | |
| | | entryAfter = ID2Entry.entryFromDatabase(bytes, |
| | | DirectoryServer.getDefaultCompressedSchema()); |
| | | entryAfter = id2entry.entryFromDatabase(bytes, DirectoryServer.getDefaultCompressedSchema()); |
| | | |
| | | // check DN and number of attributes |
| | | assertEquals(entryBefore.getAttributes().size(), entryAfter |