| | |
| | | import org.opends.server.admin.std.server.LocalDBIndexCfg; |
| | | import org.opends.server.api.DiskSpaceMonitorHandler; |
| | | import org.opends.server.backends.jeb.*; |
| | | import org.opends.server.backends.jeb.RebuildConfig.RebuildMode; |
| | | import org.opends.server.config.ConfigException; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.opends.server.extensions.DiskSpaceMonitor; |
| | |
| | | import com.sleepycat.je.*; |
| | | import com.sleepycat.util.PackedInteger; |
| | | |
| | | |
| | | /** |
| | | * This class provides the engine that performs both importing of LDIF files and |
| | | * the rebuilding of indexes. |
| | |
| | | //Defaults for LDIF reader buffers, min memory required to import and default |
| | | //size for byte buffers. |
| | | private static final int READER_WRITER_BUFFER_SIZE = 8 * KB; |
| | | private static final int MIN_DB_CACHE_MEMORY = MAX_DB_CACHE_SIZE + |
| | | MAX_DB_LOG_SIZE; |
| | | private static final int MIN_DB_CACHE_MEMORY = MAX_DB_CACHE_SIZE |
| | | + MAX_DB_LOG_SIZE; |
| | | private static final int BYTE_BUFFER_CAPACITY = 128; |
| | | |
| | | //Min and MAX sizes of phase one buffer. |
| | |
| | | |
| | | //Map of index keys to index buffers. Used to allocate sorted |
| | | //index buffers to a index writer thread. |
| | | private final |
| | | Map<IndexKey, BlockingQueue<IndexOutputBuffer>> indexKeyQueMap = |
| | | private final Map<IndexKey, BlockingQueue<IndexOutputBuffer>> indexKeyQueMap = |
| | | new ConcurrentHashMap<IndexKey, BlockingQueue<IndexOutputBuffer>>(); |
| | | |
| | | //Map of DB containers to index managers. Used to start phase 2. |
| | |
| | | //the index file writer tasks when the LDIF file has been done. |
| | | private final List<ScratchFileWriterTask> scratchFileWriterList; |
| | | |
| | | |
| | | //Map of DNs to Suffix objects. |
| | | private final Map<DN, Suffix> dnSuffixMap = new LinkedHashMap<DN, Suffix>(); |
| | | |
| | |
| | | //Number of phase one buffers |
| | | private int phaseOneBufferCount; |
| | | |
| | | |
| | | static |
| | | { |
| | | if ((dnType = DirectoryServer.getAttributeType("dn")) == null) |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Create a new import job with the specified rebuild index config. |
| | | * |
| | |
| | | this.threadCount = 1; |
| | | this.rebuildManager = new RebuildIndexManager(rebuildConfig, cfg); |
| | | this.indexCount = rebuildManager.getIndexCount(); |
| | | this.scratchFileWriterList = new ArrayList<ScratchFileWriterTask>( |
| | | indexCount); |
| | | this.scratchFileWriterList = |
| | | new ArrayList<ScratchFileWriterTask>(indexCount); |
| | | this.scratchFileWriterFutures = new CopyOnWriteArrayList<Future<?>>(); |
| | | |
| | | File parentDir; |
| | |
| | | recursiveDelete(tempDir); |
| | | if (!tempDir.exists() && !tempDir.mkdirs()) |
| | | { |
| | | Message message = ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String |
| | | .valueOf(tempDir)); |
| | | Message message = |
| | | ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String.valueOf(tempDir)); |
| | | throw new InitializationException(message); |
| | | } |
| | | this.skipDNValidation = true; |
| | | initializeDBEnv(envConfig); |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Create a new import job with the specified ldif import config. |
| | | * |
| | |
| | | this.clearedBackend = true; |
| | | } |
| | | } |
| | | this.scratchFileWriterList = new ArrayList<ScratchFileWriterTask>( |
| | | indexCount); |
| | | this.scratchFileWriterList = |
| | | new ArrayList<ScratchFileWriterTask>(indexCount); |
| | | this.scratchFileWriterFutures = new CopyOnWriteArrayList<Future<?>>(); |
| | | File parentDir; |
| | | if (importConfiguration.getTmpDirectory() == null) |
| | |
| | | recursiveDelete(tempDir); |
| | | if (!tempDir.exists() && !tempDir.mkdirs()) |
| | | { |
| | | Message message = ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String |
| | | .valueOf(tempDir)); |
| | | Message message = |
| | | ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String.valueOf(tempDir)); |
| | | throw new InitializationException(message); |
| | | } |
| | | skipDNValidation = importConfiguration.getSkipDNValidation(); |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | private int getTotalIndexCount(LocalDBBackendCfg localDBBackendCfg) |
| | | throws ConfigException |
| | | { |
| | |
| | | SortedSet<IndexType> types = index.getIndexType(); |
| | | if (types.contains(IndexType.EXTENSIBLE)) |
| | | { |
| | | indexes += types.size() - 1 |
| | | + index.getIndexExtensibleMatchingRule().size(); |
| | | indexes += |
| | | types.size() - 1 + index.getIndexExtensibleMatchingRule().size(); |
| | | } |
| | | else |
| | | { |
| | |
| | | return indexes; |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Return the suffix instance in the specified map that matches the specified |
| | | * DN. |
| | | * |
| | | * @param dn The DN to search for. |
| | | * @param map The map to search. |
| | | * @param dn |
| | | * The DN to search for. |
| | | * @param map |
| | | * The map to search. |
| | | * @return The suffix instance that matches the DN, or null if no match is |
| | | * found. |
| | | */ |
| | |
| | | Suffix suffix = null; |
| | | DN nodeDN = dn; |
| | | |
| | | while (suffix == null && nodeDN != null) { |
| | | while (suffix == null && nodeDN != null) |
| | | { |
| | | suffix = map.get(nodeDN); |
| | | if (suffix == null) |
| | | { |
| | |
| | | return suffix; |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Calculate buffer sizes and initialize JEB properties based on memory. |
| | | * |
| | |
| | | // scratch writers (1 per index). |
| | | calculateAvailableMemory(); |
| | | |
| | | final long usableMemory = availableMemory |
| | | - (indexCount * READER_WRITER_BUFFER_SIZE); |
| | | final long usableMemory = |
| | | availableMemory - (indexCount * READER_WRITER_BUFFER_SIZE); |
| | | |
| | | // We need caching when doing DN validation or rebuilding indexes. |
| | | if (!skipDNValidation || (rebuildManager != null)) |
| | |
| | | } |
| | | } |
| | | |
| | | final long phaseOneBufferMemory = usableMemory - dbCacheSize |
| | | - tmpEnvCacheSize; |
| | | final long phaseOneBufferMemory = |
| | | usableMemory - dbCacheSize - tmpEnvCacheSize; |
| | | final int oldThreadCount = threadCount; |
| | | while (true) |
| | | { |
| | | phaseOneBufferCount = 2 * indexCount * threadCount; |
| | | |
| | | // Scratch writers allocate 4 buffers per index as well. |
| | | final int totalPhaseOneBufferCount = phaseOneBufferCount |
| | | + (4 * indexCount); |
| | | final int totalPhaseOneBufferCount = |
| | | phaseOneBufferCount + (4 * indexCount); |
| | | bufferSize = (int) (phaseOneBufferMemory / totalPhaseOneBufferCount); |
| | | |
| | | if (bufferSize > MAX_BUFFER_SIZE) |
| | |
| | | // temp DB. |
| | | bufferSize = MAX_BUFFER_SIZE; |
| | | |
| | | final long extraMemory = phaseOneBufferMemory |
| | | - (totalPhaseOneBufferCount * bufferSize); |
| | | final long extraMemory = |
| | | phaseOneBufferMemory - (totalPhaseOneBufferCount * bufferSize); |
| | | if (!clearedBackend) |
| | | { |
| | | dbCacheSize += extraMemory / 2; |
| | |
| | | else |
| | | { |
| | | // Not enough memory. |
| | | final long minimumPhaseOneBufferMemory = totalPhaseOneBufferCount |
| | | * MIN_BUFFER_SIZE; |
| | | Message message = ERR_IMPORT_LDIF_LACK_MEM.get(usableMemory, |
| | | final long minimumPhaseOneBufferMemory = |
| | | totalPhaseOneBufferCount * MIN_BUFFER_SIZE; |
| | | Message message = |
| | | ERR_IMPORT_LDIF_LACK_MEM.get(usableMemory, |
| | | minimumPhaseOneBufferMemory + dbCacheSize + tmpEnvCacheSize); |
| | | throw new InitializationException(message); |
| | | } |
| | |
| | | |
| | | if (oldThreadCount != threadCount) |
| | | { |
| | | Message message = NOTE_JEB_IMPORT_ADJUST_THREAD_COUNT.get(oldThreadCount, |
| | | threadCount); |
| | | Message message = |
| | | NOTE_JEB_IMPORT_ADJUST_THREAD_COUNT.get(oldThreadCount, threadCount); |
| | | logError(message); |
| | | } |
| | | |
| | | Message message = NOTE_JEB_IMPORT_LDIF_TOT_MEM_BUF.get( |
| | | availableMemory, phaseOneBufferCount); |
| | | Message message = |
| | | NOTE_JEB_IMPORT_LDIF_TOT_MEM_BUF.get(availableMemory, |
| | | phaseOneBufferCount); |
| | | logError(message); |
| | | if (tmpEnvCacheSize > 0) |
| | | { |
| | |
| | | logError(message); |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Calculates the amount of available memory which can be used by this import, |
| | | * taking into account whether or not the import is running offline or online |
| | |
| | | } |
| | | else |
| | | { |
| | | configuredMemory = backendConfiguration.getDBCachePercent() |
| | | configuredMemory = |
| | | backendConfiguration.getDBCachePercent() |
| | | * Runtime.getRuntime().maxMemory() / 100; |
| | | } |
| | | |
| | | // Round up to minimum of 16MB (e.g. unit tests only use 2% cache). |
| | | totalAvailableMemory = Math.max(Math.min(usableMemory, configuredMemory), |
| | | 16 * MB); |
| | | totalAvailableMemory = |
| | | Math.max(Math.min(usableMemory, configuredMemory), 16 * MB); |
| | | } |
| | | else |
| | | { |
| | |
| | | availableMemory = (totalAvailableMemory * importMemPct / 100); |
| | | } |
| | | |
| | | |
| | | private void initializeIndexBuffers() |
| | | { |
| | | for(int i = 0; i < phaseOneBufferCount; i++) |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private void initializeSuffixes() throws DatabaseException, |
| | | ConfigException, InitializationException |
| | | private void initializeSuffixes() throws DatabaseException, ConfigException, |
| | | InitializationException |
| | | { |
| | | for(EntryContainer ec : rootContainer.getEntryContainers()) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | //Mainly used to support multiple suffixes. Each index in each suffix gets |
| | | //an unique ID to identify which DB it needs to go to in phase two processing. |
| | | private void generateIndexID(Suffix suffix) |
| | | { |
| | | for(Map.Entry<AttributeType, AttributeIndex> mapEntry : |
| | | suffix.getAttrIndexMap().entrySet()) { |
| | | for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix |
| | | .getAttrIndexMap().entrySet()) |
| | | { |
| | | AttributeIndex attributeIndex = mapEntry.getValue(); |
| | | DatabaseContainer container; |
| | | if((container=attributeIndex.getEqualityIndex()) != null) { |
| | | if ((container = attributeIndex.getEqualityIndex()) != null) |
| | | { |
| | | int id = System.identityHashCode(container); |
| | | idContainerMap.putIfAbsent(id, container); |
| | | } |
| | | if((container=attributeIndex.getPresenceIndex()) != null) { |
| | | if ((container = attributeIndex.getPresenceIndex()) != null) |
| | | { |
| | | int id = System.identityHashCode(container); |
| | | idContainerMap.putIfAbsent(id, container); |
| | | } |
| | | if((container=attributeIndex.getSubstringIndex()) != null) { |
| | | if ((container = attributeIndex.getSubstringIndex()) != null) |
| | | { |
| | | int id = System.identityHashCode(container); |
| | | idContainerMap.putIfAbsent(id, container); |
| | | } |
| | | if((container=attributeIndex.getOrderingIndex()) != null) { |
| | | if ((container = attributeIndex.getOrderingIndex()) != null) |
| | | { |
| | | int id = System.identityHashCode(container); |
| | | idContainerMap.putIfAbsent(id, container); |
| | | } |
| | | if((container=attributeIndex.getApproximateIndex()) != null) { |
| | | if ((container = attributeIndex.getApproximateIndex()) != null) |
| | | { |
| | | int id = System.identityHashCode(container); |
| | | idContainerMap.putIfAbsent(id, container); |
| | | } |
| | | Map<String,Collection<Index>> extensibleMap = |
| | | attributeIndex.getExtensibleIndexes(); |
| | | if(!extensibleMap.isEmpty()) { |
| | | if (!extensibleMap.isEmpty()) |
| | | { |
| | | Collection<Index> subIndexes = |
| | | attributeIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | if(subIndexes != null) { |
| | | for(DatabaseContainer subIndex : subIndexes) { |
| | | if (subIndexes != null) |
| | | { |
| | | for (DatabaseContainer subIndex : subIndexes) |
| | | { |
| | | int id = System.identityHashCode(subIndex); |
| | | idContainerMap.putIfAbsent(id, subIndex); |
| | | } |
| | |
| | | Collection<Index> sharedIndexes = |
| | | attributeIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SHARED); |
| | | if(sharedIndexes !=null) { |
| | | for(DatabaseContainer sharedIndex : sharedIndexes) { |
| | | if (sharedIndexes != null) |
| | | { |
| | | for (DatabaseContainer sharedIndex : sharedIndexes) |
| | | { |
| | | int id = System.identityHashCode(sharedIndex); |
| | | idContainerMap.putIfAbsent(id, sharedIndex); |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private Suffix getSuffix(EntryContainer entryContainer) |
| | | throws ConfigException, InitializationException { |
| | | throws ConfigException, InitializationException |
| | | { |
| | | DN baseDN = entryContainer.getBaseDN(); |
| | | EntryContainer sourceEntryContainer = null; |
| | | List<DN> includeBranches = new ArrayList<DN>(); |
| | | List<DN> excludeBranches = new ArrayList<DN>(); |
| | | |
| | | if(!importConfiguration.appendToExistingData() && |
| | | !importConfiguration.clearBackend()) |
| | | if (!importConfiguration.appendToExistingData() |
| | | && !importConfiguration.clearBackend()) |
| | | { |
| | | for(DN dn : importConfiguration.getExcludeBranches()) |
| | | { |
| | |
| | | if(includeBranches.isEmpty()) |
| | | { |
| | | /* |
| | | There are no branches in the explicitly defined include list under |
| | | this base DN. Skip this base DN all together. |
| | | * There are no branches in the explicitly defined include list under |
| | | * this base DN. Skip this base DN all together. |
| | | */ |
| | | |
| | | return null; |
| | |
| | | } |
| | | } |
| | | |
| | | if((includeBranches.size() == 1) && |
| | | excludeBranches.isEmpty() && |
| | | includeBranches.get(0).equals(baseDN)) |
| | | if ((includeBranches.size() == 1) && excludeBranches.isEmpty() |
| | | && includeBranches.get(0).equals(baseDN)) |
| | | { |
| | | // This entire base DN is explicitly included in the import with |
| | | // no exclude branches that we need to migrate. Just clear the entry |
| | |
| | | // Create a temp entry container |
| | | sourceEntryContainer = entryContainer; |
| | | entryContainer = |
| | | rootContainer.openEntryContainer(baseDN, |
| | | baseDN.toNormalizedString() + |
| | | "_importTmp"); |
| | | rootContainer.openEntryContainer(baseDN, baseDN |
| | | .toNormalizedString() |
| | | + "_importTmp"); |
| | | } |
| | | } |
| | | } |
| | |
| | | includeBranches, excludeBranches); |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Rebuild the indexes using the specified rootcontainer. |
| | | * |
| | | * @param rootContainer The rootcontainer to rebuild indexes in. |
| | | * |
| | | * @throws ConfigException If a configuration error occurred. |
| | | * @throws InitializationException If an initialization error occurred. |
| | | * @throws JebException If the JEB database had an error. |
| | | * @throws InterruptedException If an interrupted error occurred. |
| | | * @throws ExecutionException If an execution error occurred. |
| | | * @param rootContainer |
| | | * The rootcontainer to rebuild indexes in. |
| | | * @throws ConfigException |
| | | * If a configuration error occurred. |
| | | * @throws InitializationException |
| | | * If an initialization error occurred. |
| | | * @throws JebException |
| | | * If the JEB database had an error. |
| | | * @throws InterruptedException |
| | | * If an interrupted error occurred. |
| | | * @throws ExecutionException |
| | | * If an execution error occurred. |
| | | */ |
| | | public void |
| | | rebuildIndexes(RootContainer rootContainer) throws ConfigException, |
| | | InitializationException, JebException, |
| | | public void rebuildIndexes(RootContainer rootContainer) |
| | | throws ConfigException, InitializationException, JebException, |
| | | InterruptedException, ExecutionException |
| | | { |
| | | this.rootContainer = rootContainer; |
| | | long startTime = System.currentTimeMillis(); |
| | | |
| | | DiskSpaceMonitor tmpMonitor = new DiskSpaceMonitor( |
| | | backendConfiguration.getBackendId() + |
| | | " backend index rebuild tmp directory", |
| | | tempDir, backendConfiguration.getDiskLowThreshold(), |
| | | backendConfiguration.getDiskFullThreshold(), 5, |
| | | TimeUnit.SECONDS, this); |
| | | DiskSpaceMonitor tmpMonitor = |
| | | new DiskSpaceMonitor(backendConfiguration.getBackendId() |
| | | + " backend index rebuild tmp directory", tempDir, |
| | | backendConfiguration.getDiskLowThreshold(), backendConfiguration |
| | | .getDiskFullThreshold(), 5, TimeUnit.SECONDS, this); |
| | | tmpMonitor.initializeMonitorProvider(null); |
| | | DirectoryServer.registerMonitorProvider(tmpMonitor); |
| | | File parentDirectory = |
| | | getFileForPath(backendConfiguration.getDBDirectory()); |
| | | File backendDirectory = |
| | | new File(parentDirectory, backendConfiguration.getBackendId()); |
| | | DiskSpaceMonitor dbMonitor = new DiskSpaceMonitor( |
| | | backendConfiguration.getBackendId() + |
| | | " backend index rebuild DB directory", |
| | | backendDirectory, backendConfiguration.getDiskLowThreshold(), |
| | | backendConfiguration.getDiskFullThreshold(), 5, |
| | | TimeUnit.SECONDS, this); |
| | | DiskSpaceMonitor dbMonitor = |
| | | new DiskSpaceMonitor(backendConfiguration.getBackendId() |
| | | + " backend index rebuild DB directory", backendDirectory, |
| | | backendConfiguration.getDiskLowThreshold(), backendConfiguration |
| | | .getDiskFullThreshold(), 5, TimeUnit.SECONDS, this); |
| | | dbMonitor.initializeMonitorProvider(null); |
| | | DirectoryServer.registerMonitorProvider(dbMonitor); |
| | | |
| | |
| | | } |
| | | finally |
| | | { |
| | | DirectoryServer.deregisterMonitorProvider( |
| | | tmpMonitor.getMonitorInstanceName()); |
| | | DirectoryServer.deregisterMonitorProvider( |
| | | dbMonitor.getMonitorInstanceName()); |
| | | DirectoryServer.deregisterMonitorProvider(tmpMonitor |
| | | .getMonitorInstanceName()); |
| | | DirectoryServer.deregisterMonitorProvider(dbMonitor |
| | | .getMonitorInstanceName()); |
| | | tmpMonitor.finalizeMonitorProvider(); |
| | | dbMonitor.finalizeMonitorProvider(); |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Import a LDIF using the specified root container. |
| | | * |
| | | * @param rootContainer The root container to use during the import. |
| | | * |
| | | * @param rootContainer |
| | | * The root container to use during the import. |
| | | * @return A LDIF result. |
| | | * @throws ConfigException If the import failed because of an configuration |
| | | * error. |
| | | * @throws InitializationException If the import failed because of an |
| | | * initialization error. |
| | | * @throws JebException If the import failed due to a database error. |
| | | * @throws InterruptedException If the import failed due to an interrupted |
| | | * error. |
| | | * @throws ExecutionException If the import failed due to an execution error. |
| | | * @throws ConfigException |
| | | * If the import failed because of an configuration error. |
| | | * @throws InitializationException |
| | | * If the import failed because of an initialization error. |
| | | * @throws JebException |
| | | * If the import failed due to a database error. |
| | | * @throws InterruptedException |
| | | * If the import failed due to an interrupted error. |
| | | * @throws ExecutionException |
| | | * If the import failed due to an execution error. |
| | | */ |
| | | public LDIFImportResult |
| | | processImport(RootContainer rootContainer) throws ConfigException, |
| | | InitializationException, JebException, |
| | | public LDIFImportResult processImport(RootContainer rootContainer) |
| | | throws ConfigException, InitializationException, JebException, |
| | | InterruptedException, ExecutionException |
| | | { |
| | | this.rootContainer = rootContainer; |
| | | try |
| | | { |
| | | reader = new LDIFReader(importConfiguration, rootContainer, |
| | | reader = |
| | | new LDIFReader(importConfiguration, rootContainer, |
| | | READER_WRITER_BUFFER_SIZE); |
| | | } |
| | | catch (IOException ioe) |
| | |
| | | throw new InitializationException(message, ioe); |
| | | } |
| | | |
| | | DiskSpaceMonitor tmpMonitor = new DiskSpaceMonitor( |
| | | backendConfiguration.getBackendId() + " backend import tmp directory", |
| | | tempDir, backendConfiguration.getDiskLowThreshold(), |
| | | backendConfiguration.getDiskFullThreshold(), 5, |
| | | TimeUnit.SECONDS, this); |
| | | DiskSpaceMonitor tmpMonitor = |
| | | new DiskSpaceMonitor(backendConfiguration.getBackendId() |
| | | + " backend import tmp directory", tempDir, backendConfiguration |
| | | .getDiskLowThreshold(), |
| | | backendConfiguration.getDiskFullThreshold(), 5, TimeUnit.SECONDS, |
| | | this); |
| | | tmpMonitor.initializeMonitorProvider(null); |
| | | DirectoryServer.registerMonitorProvider(tmpMonitor); |
| | | File parentDirectory = |
| | | getFileForPath(backendConfiguration.getDBDirectory()); |
| | | File backendDirectory = |
| | | new File(parentDirectory, backendConfiguration.getBackendId()); |
| | | DiskSpaceMonitor dbMonitor = new DiskSpaceMonitor( |
| | | backendConfiguration.getBackendId() + " backend import DB directory", |
| | | backendDirectory, backendConfiguration.getDiskLowThreshold(), |
| | | backendConfiguration.getDiskFullThreshold(), 5, |
| | | TimeUnit.SECONDS, this); |
| | | DiskSpaceMonitor dbMonitor = |
| | | new DiskSpaceMonitor(backendConfiguration.getBackendId() |
| | | + " backend import DB directory", backendDirectory, |
| | | backendConfiguration.getDiskLowThreshold(), backendConfiguration |
| | | .getDiskFullThreshold(), 5, TimeUnit.SECONDS, this); |
| | | dbMonitor.initializeMonitorProvider(null); |
| | | DirectoryServer.registerMonitorProvider(dbMonitor); |
| | | |
| | | try |
| | | { |
| | | Message message = NOTE_JEB_IMPORT_STARTING.get( |
| | | DirectoryServer.getVersionString(), BUILD_ID, REVISION_NUMBER); |
| | | Message message = |
| | | NOTE_JEB_IMPORT_STARTING.get(DirectoryServer.getVersionString(), |
| | | BUILD_ID, REVISION_NUMBER); |
| | | logError(message); |
| | | message = NOTE_JEB_IMPORT_THREAD_COUNT.get(threadCount); |
| | | logError(message); |
| | |
| | | long finishTime = System.currentTimeMillis(); |
| | | long importTime = (finishTime - startTime); |
| | | float rate = 0; |
| | | message = NOTE_JEB_IMPORT_PHASE_STATS.get(importTime / 1000, |
| | | message = |
| | | NOTE_JEB_IMPORT_PHASE_STATS.get(importTime / 1000, |
| | | (phaseOneFinishTime - startTime) / 1000, |
| | | (phaseTwoFinishTime - phaseTwoTime) / 1000); |
| | | logError(message); |
| | | if (importTime > 0) rate = 1000f * reader.getEntriesRead() / importTime; |
| | | message = NOTE_JEB_IMPORT_FINAL_STATUS.get(reader.getEntriesRead(), |
| | | importCount.get(), reader.getEntriesIgnored(), |
| | | reader.getEntriesRejected(), migratedCount, importTime / 1000, rate); |
| | | if (importTime > 0) |
| | | rate = 1000f * reader.getEntriesRead() / importTime; |
| | | message = |
| | | NOTE_JEB_IMPORT_FINAL_STATUS.get(reader.getEntriesRead(), importCount |
| | | .get(), reader.getEntriesIgnored(), reader.getEntriesRejected(), |
| | | migratedCount, importTime / 1000, rate); |
| | | logError(message); |
| | | } |
| | | finally |
| | | { |
| | | reader.close(); |
| | | DirectoryServer.deregisterMonitorProvider( |
| | | tmpMonitor.getMonitorInstanceName()); |
| | | DirectoryServer.deregisterMonitorProvider( |
| | | dbMonitor.getMonitorInstanceName()); |
| | | DirectoryServer.deregisterMonitorProvider(tmpMonitor |
| | | .getMonitorInstanceName()); |
| | | DirectoryServer.deregisterMonitorProvider(dbMonitor |
| | | .getMonitorInstanceName()); |
| | | tmpMonitor.finalizeMonitorProvider(); |
| | | dbMonitor.finalizeMonitorProvider(); |
| | | } |
| | | return new LDIFImportResult(reader.getEntriesRead(), |
| | | reader.getEntriesRejected(), reader.getEntriesIgnored()); |
| | | return new LDIFImportResult(reader.getEntriesRead(), reader |
| | | .getEntriesRejected(), reader.getEntriesIgnored()); |
| | | } |
| | | |
| | | |
| | | private void recursiveDelete(File dir) |
| | | { |
| | | if(dir.listFiles() != null) |
| | |
| | | dir.delete(); |
| | | } |
| | | |
| | | |
| | | private void switchContainers() |
| | | throws DatabaseException, JebException, InitializationException |
| | | private void switchContainers() throws DatabaseException, JebException, |
| | | InitializationException |
| | | { |
| | | |
| | | for(Suffix suffix : dnSuffixMap.values()) { |
| | | for (Suffix suffix : dnSuffixMap.values()) |
| | | { |
| | | DN baseDN = suffix.getBaseDN(); |
| | | EntryContainer entryContainer = |
| | | suffix.getSrcEntryContainer(); |
| | | if(entryContainer != null) { |
| | | EntryContainer entryContainer = suffix.getSrcEntryContainer(); |
| | | if (entryContainer != null) |
| | | { |
| | | EntryContainer needRegisterContainer = |
| | | rootContainer.unregisterEntryContainer(baseDN); |
| | | |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | private void setIndexesTrusted(boolean trusted) throws JebException |
| | | { |
| | | try { |
| | | for(Suffix s : dnSuffixMap.values()) { |
| | | try |
| | | { |
| | | for (Suffix s : dnSuffixMap.values()) |
| | | { |
| | | s.setIndexesTrusted(trusted); |
| | | } |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | private void phaseOne() throws InterruptedException, ExecutionException |
| | | { |
| | | initializeIndexBuffers(); |
| | | FirstPhaseProgressTask progressTask = new FirstPhaseProgressTask(); |
| | | ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor( |
| | | 1); |
| | | ScheduledThreadPoolExecutor timerService = |
| | | new ScheduledThreadPoolExecutor(1); |
| | | timerService.scheduleAtFixedRate(progressTask, TIMER_INTERVAL, |
| | | TIMER_INTERVAL, TimeUnit.MILLISECONDS); |
| | | scratchFileWriterService = Executors.newFixedThreadPool(2 * indexCount); |
| | |
| | | freeBufferQueue.clear(); |
| | | } |
| | | |
| | | |
| | | |
| | | private void phaseTwo() throws InterruptedException, ExecutionException |
| | | { |
| | | SecondPhaseProgressTask progress2Task = new SecondPhaseProgressTask( |
| | | reader.getEntriesRead()); |
| | | ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor( |
| | | 1); |
| | | SecondPhaseProgressTask progress2Task = |
| | | new SecondPhaseProgressTask(reader.getEntriesRead()); |
| | | ScheduledThreadPoolExecutor timerService = |
| | | new ScheduledThreadPoolExecutor(1); |
| | | timerService.scheduleAtFixedRate(progress2Task, TIMER_INTERVAL, |
| | | TIMER_INTERVAL, TimeUnit.MILLISECONDS); |
| | | try |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | private void processIndexFiles() |
| | | throws InterruptedException, ExecutionException |
| | | private void processIndexFiles() throws InterruptedException, |
| | | ExecutionException |
| | | { |
| | | if(bufferCount.get() == 0) |
| | | { |
| | |
| | | int buffers; |
| | | while (true) |
| | | { |
| | | final List<IndexManager> totList = new ArrayList<IndexManager>( |
| | | DNIndexMgrList); |
| | | final List<IndexManager> totList = |
| | | new ArrayList<IndexManager>(DNIndexMgrList); |
| | | totList.addAll(indexMgrList); |
| | | Collections.sort(totList, Collections.reverseOrder()); |
| | | |
| | |
| | | // processing of smaller indexes. |
| | | dbThreads = Math.max(2, dbThreads); |
| | | |
| | | Message message = NOTE_JEB_IMPORT_LDIF_PHASE_TWO_MEM_REPORT.get( |
| | | availableMemory, readAheadSize, buffers); |
| | | Message message = |
| | | NOTE_JEB_IMPORT_LDIF_PHASE_TWO_MEM_REPORT.get(availableMemory, |
| | | readAheadSize, buffers); |
| | | logError(message); |
| | | |
| | | // Start indexing tasks. |
| | |
| | | dbService.shutdown(); |
| | | } |
| | | |
| | | |
| | | |
| | | private void stopScratchFileWriters() |
| | | { |
| | | IndexOutputBuffer indexBuffer = new IndexOutputBuffer(0); |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Task used to migrate excluded branch. |
| | | */ |
| | |
| | | @Override |
| | | public Void call() throws Exception |
| | | { |
| | | for(Suffix suffix : dnSuffixMap.values()) { |
| | | for (Suffix suffix : dnSuffixMap.values()) |
| | | { |
| | | EntryContainer entryContainer = suffix.getSrcEntryContainer(); |
| | | if(entryContainer != null && |
| | | !suffix.getExcludeBranches().isEmpty()) { |
| | | if (entryContainer != null && !suffix.getExcludeBranches().isEmpty()) |
| | | { |
| | | DatabaseEntry key = new DatabaseEntry(); |
| | | DatabaseEntry data = new DatabaseEntry(); |
| | | LockMode lockMode = LockMode.DEFAULT; |
| | | OperationStatus status; |
| | | Message message = NOTE_JEB_IMPORT_MIGRATION_START.get( |
| | | "excluded", String.valueOf(suffix.getBaseDN())); |
| | | Message message = |
| | | NOTE_JEB_IMPORT_MIGRATION_START.get("excluded", String |
| | | .valueOf(suffix.getBaseDN())); |
| | | logError(message); |
| | | Cursor cursor = |
| | | entryContainer.getDN2ID().openCursor(null, |
| | | CursorConfig.READ_COMMITTED); |
| | | Comparator<byte[]> comparator = |
| | | entryContainer.getDN2ID().getComparator(); |
| | | try { |
| | | for(DN excludedDN : suffix.getExcludeBranches()) { |
| | | byte[] bytes = JebFormat.dnToDNKey( |
| | | excludedDN, suffix.getBaseDN().getNumComponents()); |
| | | try |
| | | { |
| | | for (DN excludedDN : suffix.getExcludeBranches()) |
| | | { |
| | | byte[] bytes = |
| | | JebFormat.dnToDNKey(excludedDN, suffix.getBaseDN() |
| | | .getNumComponents()); |
| | | key.setData(bytes); |
| | | status = cursor.getSearchKeyRange(key, data, lockMode); |
| | | if(status == OperationStatus.SUCCESS && |
| | | Arrays.equals(key.getData(), bytes)) { |
| | | if (status == OperationStatus.SUCCESS |
| | | && Arrays.equals(key.getData(), bytes)) |
| | | { |
| | | // This is the base entry for a branch that was excluded in the |
| | | // import so we must migrate all entries in this branch over to |
| | | // the new entry container. |
| | | byte[] end = Arrays.copyOf(bytes, bytes.length+1); |
| | | end[end.length-1] = 0x01; |
| | | |
| | | while(status == OperationStatus.SUCCESS && |
| | | comparator.compare(key.getData(), end) < 0 && |
| | | !importConfiguration.isCancelled() && |
| | | !isCanceled) { |
| | | while (status == OperationStatus.SUCCESS |
| | | && comparator.compare(key.getData(), end) < 0 |
| | | && !importConfiguration.isCancelled() && !isCanceled) |
| | | { |
| | | EntryID id = new EntryID(data); |
| | | Entry entry = entryContainer.getID2Entry().get(null, |
| | | id, LockMode.DEFAULT); |
| | | processEntry(entry, rootContainer.getNextEntryID(), |
| | | suffix); |
| | | Entry entry = |
| | | entryContainer.getID2Entry().get(null, id, |
| | | LockMode.DEFAULT); |
| | | processEntry(entry, rootContainer.getNextEntryID(), suffix); |
| | | migratedCount++; |
| | | status = cursor.getNext(key, data, lockMode); |
| | | } |
| | |
| | | catch (Exception e) |
| | | { |
| | | message = |
| | | ERR_JEB_IMPORT_LDIF_MIGRATE_EXCLUDED_TASK_ERR.get(e.getMessage()); |
| | | ERR_JEB_IMPORT_LDIF_MIGRATE_EXCLUDED_TASK_ERR.get(e |
| | | .getMessage()); |
| | | logError(message); |
| | | isCanceled =true; |
| | | throw e; |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Task to migrate existing entries. |
| | | */ |
| | |
| | | @Override |
| | | public Void call() throws Exception |
| | | { |
| | | for(Suffix suffix : dnSuffixMap.values()) { |
| | | for (Suffix suffix : dnSuffixMap.values()) |
| | | { |
| | | List<byte[]> includeBranches = |
| | | new ArrayList<byte[]>(suffix.getIncludeBranches().size()); |
| | | for(DN includeBranch : suffix.getIncludeBranches()) |
| | | { |
| | | if(includeBranch.isDescendantOf(suffix.getBaseDN())) |
| | | { |
| | | includeBranches.add(JebFormat.dnToDNKey( |
| | | includeBranch, suffix.getBaseDN().getNumComponents())); |
| | | includeBranches.add(JebFormat.dnToDNKey(includeBranch, suffix |
| | | .getBaseDN().getNumComponents())); |
| | | } |
| | | } |
| | | |
| | | EntryContainer entryContainer = suffix.getSrcEntryContainer(); |
| | | if(entryContainer != null && |
| | | !suffix.getIncludeBranches().isEmpty()) { |
| | | if (entryContainer != null && !suffix.getIncludeBranches().isEmpty()) |
| | | { |
| | | DatabaseEntry key = new DatabaseEntry(); |
| | | DatabaseEntry data = new DatabaseEntry(); |
| | | LockMode lockMode = LockMode.DEFAULT; |
| | | OperationStatus status; |
| | | Message message = NOTE_JEB_IMPORT_MIGRATION_START.get( |
| | | "existing", String.valueOf(suffix.getBaseDN())); |
| | | Message message = |
| | | NOTE_JEB_IMPORT_MIGRATION_START.get("existing", String |
| | | .valueOf(suffix.getBaseDN())); |
| | | logError(message); |
| | | Cursor cursor = |
| | | entryContainer.getDN2ID().openCursor(null, |
| | | null); |
| | | try { |
| | | Cursor cursor = entryContainer.getDN2ID().openCursor(null, null); |
| | | try |
| | | { |
| | | status = cursor.getFirst(key, data, lockMode); |
| | | while(status == OperationStatus.SUCCESS && |
| | | !importConfiguration.isCancelled() && !isCanceled) { |
| | | while (status == OperationStatus.SUCCESS |
| | | && !importConfiguration.isCancelled() && !isCanceled) |
| | | { |
| | | |
| | | boolean found = false; |
| | | for(byte[] includeBranch : includeBranches) |
| | |
| | | break; |
| | | } |
| | | } |
| | | if(!found) { |
| | | if (!found) |
| | | { |
| | | EntryID id = new EntryID(data); |
| | | Entry entry = |
| | | entryContainer.getID2Entry().get(null, |
| | | id, LockMode.DEFAULT); |
| | | entryContainer.getID2Entry() |
| | | .get(null, id, LockMode.DEFAULT); |
| | | processEntry(entry, rootContainer.getNextEntryID(),suffix); |
| | | migratedCount++; |
| | | status = cursor.getNext(key, data, lockMode); |
| | | } else { |
| | | } |
| | | else |
| | | { |
| | | // This is the base entry for a branch that will be included |
| | | // in the import so we don't want to copy the branch to the |
| | | // new entry container. |
| | | |
| | | /** |
| | | * Advance the cursor to next entry at the same level in the |
| | | * DIT |
| | | * skipping all the entries in this branch. |
| | | * Set the next starting value to a value of equal length but |
| | | * slightly greater than the previous DN. Since keys are |
| | | * compared in reverse order we must set the first byte |
| | | * (the comma). |
| | | * No possibility of overflow here. |
| | | * Advance the cursor to next entry at the same level in the DIT |
| | | * skipping all the entries in this branch. Set the next |
| | | * starting value to a value of equal length but slightly |
| | | * greater than the previous DN. Since keys are compared in |
| | | * reverse order we must set the first byte (the comma). No |
| | | * possibility of overflow here. |
| | | */ |
| | | byte[] begin = Arrays.copyOf(key.getData(), key.getSize()+1); |
| | | begin[begin.length-1] = 0x01; |
| | |
| | | catch(Exception e) |
| | | { |
| | | message = |
| | | ERR_JEB_IMPORT_LDIF_MIGRATE_EXISTING_TASK_ERR.get(e.getMessage()); |
| | | ERR_JEB_IMPORT_LDIF_MIGRATE_EXISTING_TASK_ERR.get(e |
| | | .getMessage()); |
| | | logError(message); |
| | | isCanceled =true; |
| | | throw e; |
| | |
| | | private Entry oldEntry; |
| | | private EntryID entryID; |
| | | |
| | | |
| | | /** |
| | | * {@inheritDoc} |
| | | */ |
| | |
| | | return null; |
| | | } |
| | | |
| | | |
| | | void processEntry(Entry entry, Suffix suffix) |
| | | throws DatabaseException, DirectoryException, |
| | | JebException, InterruptedException |
| | | void processEntry(Entry entry, Suffix suffix) throws DatabaseException, |
| | | DirectoryException, JebException, InterruptedException |
| | | |
| | | { |
| | | DN entryDN = entry.getDN(); |
| | |
| | | importCount.getAndIncrement(); |
| | | } |
| | | |
| | | |
| | | void |
| | | processAllIndexes(Suffix suffix, Entry entry, EntryID entryID) throws |
| | | DatabaseException, DirectoryException, JebException, |
| | | void processAllIndexes(Suffix suffix, Entry entry, EntryID entryID) |
| | | throws DatabaseException, DirectoryException, JebException, |
| | | InterruptedException |
| | | { |
| | | |
| | | for(Map.Entry<AttributeType, AttributeIndex> mapEntry : |
| | | suffix.getAttrIndexMap().entrySet()) { |
| | | for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix |
| | | .getAttrIndexMap().entrySet()) |
| | | { |
| | | AttributeType attributeType = mapEntry.getKey(); |
| | | AttributeIndex attributeIndex = mapEntry.getValue(); |
| | | Index index; |
| | | if((index=attributeIndex.getEqualityIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.EQUALITY, |
| | | index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getPresenceIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.PRESENCE, |
| | | index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getSubstringIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.SUBSTRING, |
| | | index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getOrderingIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.ORDERING, |
| | | index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getApproximateIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.APPROXIMATE, |
| | | index.getIndexEntryLimit())); |
| | | } |
| | | for(VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) { |
| | | Transaction transaction = null; |
| | | vlvIdx.addEntry(transaction, entryID, entry); |
| | | } |
| | | Map<String,Collection<Index>> extensibleMap = |
| | | attributeIndex.getExtensibleIndexes(); |
| | | if(!extensibleMap.isEmpty()) { |
| | | Collection<Index> subIndexes = |
| | | attributeIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | if(subIndexes != null) { |
| | | for(Index subIndex: subIndexes) { |
| | | processAttribute(subIndex, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.EX_SUBSTRING, |
| | | subIndex.getIndexEntryLimit())); |
| | | fillIndexKey(suffix, mapEntry, entry, attributeType, entryID); |
| | | } |
| | | } |
| | | Collection<Index> sharedIndexes = |
| | | attributeIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SHARED); |
| | | if(sharedIndexes !=null) { |
| | | for(Index sharedIndex:sharedIndexes) { |
| | | processAttribute(sharedIndex, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.EX_SHARED, |
| | | sharedIndex.getIndexEntryLimit())); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | |
| | | @Override |
| | | void processAttribute(Index index, Entry entry, EntryID entryID, |
| | | IndexKey indexKey) throws DatabaseException, |
| | | InterruptedException |
| | | IndexKey indexKey) throws DatabaseException, InterruptedException |
| | | { |
| | | if(oldEntry != null) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * This task performs phase reading and processing of the entries read from |
| | | * the LDIF file(s). This task is used if the append flag wasn't specified. |
| | |
| | | private DatabaseEntry keyEntry = new DatabaseEntry(), |
| | | valEntry = new DatabaseEntry(); |
| | | |
| | | |
| | | /** |
| | | * {@inheritDoc} |
| | | */ |
| | |
| | | return null; |
| | | } |
| | | |
| | | |
| | | void processEntry(Entry entry, EntryID entryID, Suffix suffix) |
| | | throws DatabaseException, DirectoryException, |
| | | JebException, InterruptedException |
| | | throws DatabaseException, DirectoryException, JebException, |
| | | InterruptedException |
| | | |
| | | { |
| | | DN entryDN = entry.getDN(); |
| | |
| | | { |
| | | //Perform parent checking. |
| | | DN parentDN = suffix.getEntryContainer().getParentWithinBase(entryDN); |
| | | if (parentDN != null) { |
| | | if (!suffix.isParentProcessed(parentDN, tmpEnv, clearedBackend)) { |
| | | if (parentDN != null) |
| | | { |
| | | if (!suffix.isParentProcessed(parentDN, tmpEnv, clearedBackend)) |
| | | { |
| | | Message message = |
| | | ERR_JEB_IMPORT_PARENT_NOT_FOUND.get(parentDN.toString()); |
| | | reader.rejectEntry(entry, message); |
| | |
| | | return true; |
| | | } |
| | | |
| | | |
| | | void |
| | | processIndexes(Suffix suffix, Entry entry, EntryID entryID) throws |
| | | DatabaseException, DirectoryException, JebException, |
| | | void processIndexes(Suffix suffix, Entry entry, EntryID entryID) |
| | | throws DatabaseException, DirectoryException, JebException, |
| | | InterruptedException |
| | | { |
| | | for(Map.Entry<AttributeType, AttributeIndex> mapEntry : |
| | | suffix.getAttrIndexMap().entrySet()) { |
| | | for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix |
| | | .getAttrIndexMap().entrySet()) |
| | | { |
| | | AttributeType attributeType = mapEntry.getKey(); |
| | | if(entry.hasAttribute(attributeType)) { |
| | | if (entry.hasAttribute(attributeType)) |
| | | { |
| | | fillIndexKey(suffix, mapEntry, entry, attributeType, entryID); |
| | | } |
| | | } |
| | | } |
| | | |
| | | void fillIndexKey(Suffix suffix, |
| | | Map.Entry<AttributeType, AttributeIndex> mapEntry, Entry entry, |
| | | AttributeType attributeType, EntryID entryID) throws DatabaseException, |
| | | InterruptedException, DirectoryException, JebException |
| | | { |
| | | AttributeIndex attributeIndex = mapEntry.getValue(); |
| | | Index index; |
| | | if((index=attributeIndex.getEqualityIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.EQUALITY, |
| | | index.getIndexEntryLimit())); |
| | | if ((index = attributeIndex.getEqualityIndex()) != null) |
| | | { |
| | | processAttribute(index, entry, entryID, new IndexKey(attributeType, |
| | | ImportIndexType.EQUALITY, index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getPresenceIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.PRESENCE, |
| | | index.getIndexEntryLimit())); |
| | | if ((index = attributeIndex.getPresenceIndex()) != null) |
| | | { |
| | | processAttribute(index, entry, entryID, new IndexKey(attributeType, |
| | | ImportIndexType.PRESENCE, index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getSubstringIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.SUBSTRING, |
| | | index.getIndexEntryLimit())); |
| | | if ((index = attributeIndex.getSubstringIndex()) != null) |
| | | { |
| | | processAttribute(index, entry, entryID, new IndexKey(attributeType, |
| | | ImportIndexType.SUBSTRING, index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getOrderingIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.ORDERING, |
| | | index.getIndexEntryLimit())); |
| | | if ((index = attributeIndex.getOrderingIndex()) != null) |
| | | { |
| | | processAttribute(index, entry, entryID, new IndexKey(attributeType, |
| | | ImportIndexType.ORDERING, index.getIndexEntryLimit())); |
| | | } |
| | | if((index=attributeIndex.getApproximateIndex()) != null) { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.APPROXIMATE, |
| | | index.getIndexEntryLimit())); |
| | | if ((index = attributeIndex.getApproximateIndex()) != null) |
| | | { |
| | | processAttribute(index, entry, entryID, new IndexKey(attributeType, |
| | | ImportIndexType.APPROXIMATE, index.getIndexEntryLimit())); |
| | | } |
| | | for(VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) { |
| | | for (VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) |
| | | { |
| | | Transaction transaction = null; |
| | | vlvIdx.addEntry(transaction, entryID, entry); |
| | | } |
| | | Map<String,Collection<Index>> extensibleMap = |
| | | attributeIndex.getExtensibleIndexes(); |
| | | if(!extensibleMap.isEmpty()) { |
| | | if (!extensibleMap.isEmpty()) |
| | | { |
| | | Collection<Index> subIndexes = |
| | | attributeIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | if(subIndexes != null) { |
| | | for(Index subIndex: subIndexes) { |
| | | processAttribute(subIndex, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.EX_SUBSTRING, |
| | | subIndex.getIndexEntryLimit())); |
| | | if (subIndexes != null) |
| | | { |
| | | for (Index subIndex : subIndexes) |
| | | { |
| | | processAttribute(subIndex, entry, entryID, new IndexKey( |
| | | attributeType, ImportIndexType.EX_SUBSTRING, subIndex |
| | | .getIndexEntryLimit())); |
| | | } |
| | | } |
| | | Collection<Index> sharedIndexes = |
| | | attributeIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SHARED); |
| | | if(sharedIndexes !=null) { |
| | | for(Index sharedIndex:sharedIndexes) { |
| | | processAttribute(sharedIndex, entry, entryID, |
| | | new IndexKey(attributeType, ImportIndexType.EX_SHARED, |
| | | sharedIndex.getIndexEntryLimit())); |
| | | if (sharedIndexes != null) |
| | | { |
| | | for (Index sharedIndex : sharedIndexes) |
| | | { |
| | | processAttribute(sharedIndex, entry, entryID, new IndexKey( |
| | | attributeType, ImportIndexType.EX_SHARED, sharedIndex |
| | | .getIndexEntryLimit())); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | void processAttribute(Index index, Entry entry, EntryID entryID, |
| | | IndexKey indexKey) throws DatabaseException, |
| | | InterruptedException |
| | | IndexKey indexKey) throws DatabaseException, InterruptedException |
| | | { |
| | | insertKeySet.clear(); |
| | | index.indexer.indexEntry(entry, insertKeySet); |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | void flushIndexBuffers() throws InterruptedException, |
| | | ExecutionException |
| | | void flushIndexBuffers() throws InterruptedException, ExecutionException |
| | | { |
| | | Set<Map.Entry<IndexKey, IndexOutputBuffer>> set = |
| | | indexBufferMap.entrySet(); |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | int |
| | | processKey(DatabaseContainer container, byte[] key, EntryID entryID, |
| | | int processKey(DatabaseContainer container, byte[] key, EntryID entryID, |
| | | IndexOutputBuffer.ComparatorBuffer<byte[]> comparator, |
| | | IndexKey indexKey, boolean insert) |
| | | throws InterruptedException |
| | | IndexKey indexKey, boolean insert) throws InterruptedException |
| | | { |
| | | IndexOutputBuffer indexBuffer = indexBufferMap.get(indexKey); |
| | | if (indexBuffer == null) |
| | |
| | | return id; |
| | | } |
| | | |
| | | |
| | | IndexOutputBuffer getNewIndexBuffer() throws InterruptedException |
| | | { |
| | | IndexOutputBuffer indexBuffer = freeBufferQueue.take(); |
| | | if(indexBuffer == null) |
| | | { |
| | | Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | Message message = |
| | | Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | "Index buffer processing error."); |
| | | throw new InterruptedException(message.toString()); |
| | | } |
| | | if(indexBuffer.isPoison()) |
| | | { |
| | | Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | Message message = |
| | | Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | "Cancel processing received."); |
| | | throw new InterruptedException(message.toString()); |
| | | } |
| | | return indexBuffer; |
| | | } |
| | | |
| | | |
| | | void processDN2ID(Suffix suffix, DN dn, EntryID entryID) |
| | | throws InterruptedException |
| | | { |
| | | DN2ID dn2id = suffix.getDN2ID(); |
| | | byte[] dnBytes = |
| | | JebFormat.dnToDNKey(dn, suffix.getBaseDN().getNumComponents()); |
| | | int id = processKey(dn2id, dnBytes, entryID, indexComparator, |
| | | new IndexKey(dnType, ImportIndexType.DN, 1), true); |
| | | int id = |
| | | processKey(dn2id, dnBytes, entryID, indexComparator, new IndexKey( |
| | | dnType, ImportIndexType.DN, 1), true); |
| | | idECMap.putIfAbsent(id, suffix.getEntryContainer()); |
| | | } |
| | | |
| | | |
| | | void processDN2URI(Suffix suffix, Entry oldEntry, Entry newEntry) |
| | | throws DatabaseException |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * This task reads sorted records from the temporary index scratch files, |
| | | * processes the records and writes the results to the index database. The |
| | | * DN index is treated differently then non-DN indexes. |
| | | * processes the records and writes the results to the index database. The DN |
| | | * index is treated differently then non-DN indexes. |
| | | */ |
| | | private final class IndexDBWriteTask implements Callable<Void> |
| | | { |
| | |
| | | private int ownedPermits; |
| | | private volatile boolean isRunning = false; |
| | | |
| | | |
| | | /** |
| | | * Creates a new index DB writer. |
| | | * |
| | |
| | | this.dbValue = new DatabaseEntry(); |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Initializes this task. |
| | | * |
| | |
| | | public void beginWriteTask() throws IOException |
| | | { |
| | | bufferFile = new RandomAccessFile(indexMgr.getBufferFile(), "r"); |
| | | bufferIndexFile = new DataInputStream(new BufferedInputStream( |
| | | new FileInputStream(indexMgr.getBufferIndexFile()))); |
| | | bufferIndexFile = |
| | | new DataInputStream(new BufferedInputStream(new FileInputStream( |
| | | indexMgr.getBufferIndexFile()))); |
| | | |
| | | remainingBuffers = indexMgr.getNumberOfBuffers(); |
| | | totalBatches = (remainingBuffers / maxPermits) + 1; |
| | |
| | | nextBufferID = 0; |
| | | ownedPermits = 0; |
| | | |
| | | Message message = NOTE_JEB_IMPORT_LDIF_INDEX_STARTED.get( |
| | | indexMgr.getBufferFileName(), remainingBuffers, totalBatches); |
| | | Message message = |
| | | NOTE_JEB_IMPORT_LDIF_INDEX_STARTED.get(indexMgr.getBufferFileName(), |
| | | remainingBuffers, totalBatches); |
| | | logError(message); |
| | | |
| | | indexMgr.setIndexDBWriteTask(this); |
| | | isRunning = true; |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Returns the next batch of buffers to be processed, blocking until enough |
| | | * buffer permits are available. |
| | |
| | | { |
| | | final long bufferBegin = bufferIndexFile.readLong(); |
| | | final long bufferEnd = bufferIndexFile.readLong(); |
| | | final IndexInputBuffer b = new IndexInputBuffer(indexMgr, |
| | | bufferFile.getChannel(), bufferBegin, bufferEnd, nextBufferID++, |
| | | cacheSize); |
| | | final IndexInputBuffer b = |
| | | new IndexInputBuffer(indexMgr, bufferFile.getChannel(), |
| | | bufferBegin, bufferEnd, nextBufferID++, cacheSize); |
| | | buffers.add(b); |
| | | } |
| | | |
| | | return buffers; |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Finishes this task. |
| | | */ |
| | |
| | | } |
| | | if(!isCanceled) |
| | | { |
| | | Message msg = NOTE_JEB_IMPORT_LDIF_DN_CLOSE |
| | | .get(indexMgr.getDNCount()); |
| | | Message msg = |
| | | NOTE_JEB_IMPORT_LDIF_DN_CLOSE.get(indexMgr.getDNCount()); |
| | | logError(msg); |
| | | } |
| | | } |
| | |
| | | } |
| | | if(!isCanceled) |
| | | { |
| | | Message message = NOTE_JEB_IMPORT_LDIF_INDEX_CLOSE.get(indexMgr |
| | | Message message = |
| | | NOTE_JEB_IMPORT_LDIF_INDEX_CLOSE.get(indexMgr |
| | | .getBufferFileName()); |
| | | logError(message); |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Print out progress stats. |
| | | * |
| | |
| | | final int currentBatch = batchNumber.get(); |
| | | |
| | | final long bytesReadInterval = tmpBytesRead - lastBytesRead; |
| | | final int bytesReadPercent = Math.round((100f * tmpBytesRead) |
| | | / bufferFileSize); |
| | | final int bytesReadPercent = |
| | | Math.round((100f * tmpBytesRead) / bufferFileSize); |
| | | |
| | | // Kilo and milli approximately cancel out. |
| | | final long kiloBytesRate = bytesReadInterval / deltaTime; |
| | | final long kiloBytesRemaining = (bufferFileSize - tmpBytesRead) / 1024; |
| | | |
| | | Message message = NOTE_JEB_IMPORT_LDIF_PHASE_TWO_REPORT.get( |
| | | indexMgr.getBufferFileName(), bytesReadPercent, kiloBytesRemaining, |
| | | Message message = |
| | | NOTE_JEB_IMPORT_LDIF_PHASE_TWO_REPORT.get(indexMgr |
| | | .getBufferFileName(), bytesReadPercent, kiloBytesRemaining, |
| | | kiloBytesRate, currentBatch, totalBatches); |
| | | logError(message); |
| | | |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * {@inheritDoc} |
| | | */ |
| | |
| | | } |
| | | catch (Exception e) |
| | | { |
| | | Message message = ERR_JEB_IMPORT_LDIF_INDEX_WRITE_DB_ERR.get( |
| | | indexMgr.getBufferFileName(), e.getMessage()); |
| | | Message message = |
| | | ERR_JEB_IMPORT_LDIF_INDEX_WRITE_DB_ERR.get(indexMgr |
| | | .getBufferFileName(), e.getMessage()); |
| | | logError(message); |
| | | throw e; |
| | | } |
| | |
| | | return null; |
| | | } |
| | | |
| | | |
| | | private void addToDB(ImportIDSet insertSet, ImportIDSet deleteSet, |
| | | int indexID) |
| | | { |
| | |
| | | Index index; |
| | | if((deleteSet.size() > 0) || (!deleteSet.isDefined())) |
| | | { |
| | | dbKey.setData(deleteSet.getKey().array(), 0, |
| | | deleteSet.getKey().limit()); |
| | | dbKey.setData(deleteSet.getKey().array(), 0, deleteSet.getKey() |
| | | .limit()); |
| | | index = (Index)idContainerMap.get(indexID); |
| | | index.delete(dbKey, deleteSet, dbValue); |
| | | if(!indexMap.containsKey(indexID)) |
| | |
| | | } |
| | | if((insertSet.size() > 0) || (!insertSet.isDefined())) |
| | | { |
| | | dbKey.setData(insertSet.getKey().array(), 0, |
| | | insertSet.getKey().limit()); |
| | | dbKey.setData(insertSet.getKey().array(), 0, insertSet.getKey() |
| | | .limit()); |
| | | index = (Index)idContainerMap.get(indexID); |
| | | index.insert(dbKey, insertSet, dbValue); |
| | | if(!indexMap.containsKey(indexID)) |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private void addDN2ID(ImportIDSet record, Integer indexID) |
| | | { |
| | | DNState dnState; |
| | |
| | | dnState.writeToDB(); |
| | | } |
| | | |
| | | |
| | | |
| | | private void addBytesRead(int bytesRead) |
| | | { |
| | | this.bytesRead.addAndGet(bytesRead); |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * This class is used to by a index DB merge thread performing DN processing |
| | | * to keep track of the state of individual DN2ID index processing. |
| | |
| | | private final int childLimit, subTreeLimit; |
| | | private final boolean childDoCount, subTreeDoCount; |
| | | |
| | | |
| | | DNState(EntryContainer entryContainer) |
| | | { |
| | | this.entryContainer = entryContainer; |
| | |
| | | lastDN = ByteBuffer.allocate(BYTE_BUFFER_CAPACITY); |
| | | } |
| | | |
| | | |
| | | private ByteBuffer getParent(ByteBuffer buffer) |
| | | { |
| | | int parentIndex = |
| | |
| | | |
| | | private ByteBuffer deepCopy(ByteBuffer srcBuffer, ByteBuffer destBuffer) |
| | | { |
| | | if(destBuffer == null || |
| | | destBuffer.clear().remaining() < srcBuffer.limit()) |
| | | if (destBuffer == null |
| | | || destBuffer.clear().remaining() < srcBuffer.limit()) |
| | | { |
| | | byte[] bytes = new byte[srcBuffer.limit()]; |
| | | System.arraycopy(srcBuffer.array(), 0, bytes, 0, |
| | | srcBuffer.limit()); |
| | | System.arraycopy(srcBuffer.array(), 0, bytes, 0, srcBuffer.limit()); |
| | | return ByteBuffer.wrap(bytes); |
| | | } |
| | | else |
| | |
| | | |
| | | //Bypass the cache for append data, lookup the parent in DN2ID and |
| | | //return. |
| | | if(importConfiguration != null && |
| | | importConfiguration.appendToExistingData()) |
| | | if (importConfiguration != null |
| | | && importConfiguration.appendToExistingData()) |
| | | { |
| | | //If null is returned than this is a suffix DN. |
| | | if(parentDN != null) |
| | |
| | | { |
| | | EntryID newParentID = parentIDMap.get(parentDN); |
| | | ByteBuffer key = parentIDMap.lastKey(); |
| | | while(!parentDN.equals(key)) { |
| | | while (!parentDN.equals(key)) |
| | | { |
| | | parentIDMap.remove(key); |
| | | key = parentIDMap.lastKey(); |
| | | } |
| | |
| | | return true; |
| | | } |
| | | |
| | | |
| | | private void id2child(EntryID childID) |
| | | { |
| | | ImportIDSet idSet; |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private EntryID getParentID(ByteBuffer dn) throws DatabaseException |
| | | { |
| | | EntryID nodeID; |
| | | //Bypass the cache for append data, lookup the parent DN in the DN2ID |
| | | //db. |
| | | if (importConfiguration != null && |
| | | importConfiguration.appendToExistingData()) |
| | | if (importConfiguration != null |
| | | && importConfiguration.appendToExistingData()) |
| | | { |
| | | DatabaseEntry key = new DatabaseEntry(dn.array(), 0, dn.limit()); |
| | | DatabaseEntry value = new DatabaseEntry(); |
| | | OperationStatus status; |
| | | status = |
| | | entryContainer.getDN2ID().read(null, key, value, |
| | | LockMode.DEFAULT); |
| | | entryContainer.getDN2ID() |
| | | .read(null, key, value, LockMode.DEFAULT); |
| | | if(status == OperationStatus.SUCCESS) |
| | | { |
| | | nodeID = new EntryID(value); |
| | |
| | | return nodeID; |
| | | } |
| | | |
| | | |
| | | private void id2SubTree(EntryID childID) |
| | | { |
| | | ImportIDSet idSet; |
| | |
| | | idSet.addEntryID(childID); |
| | | // TODO: |
| | | // Instead of doing this, we can just walk to parent cache if available |
| | | for (ByteBuffer dn = getParent(parentDN); dn != null; |
| | | dn = getParent(dn)) |
| | | for (ByteBuffer dn = getParent(parentDN); dn != null; dn = |
| | | getParent(dn)) |
| | | { |
| | | EntryID nodeID = getParentID(dn); |
| | | if(nodeID == null) |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | public void writeToDB() |
| | | { |
| | | entryContainer.getDN2ID().put(null, dnKey, dnValue); |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private void flushMapToDB(Map<byte[], ImportIDSet> map, Index index, |
| | | boolean clearMap) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | public void flush() |
| | | { |
| | | flushMapToDB(id2childTree, entryContainer.getID2Children(), false); |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * This task writes the temporary scratch index files using the sorted |
| | | * buffers read from a blocking queue private to each index. |
| | | * This task writes the temporary scratch index files using the sorted buffers |
| | | * read from a blocking queue private to each index. |
| | | */ |
| | | private final class ScratchFileWriterTask implements Callable<Void> |
| | | { |
| | |
| | | private final SortedSet<IndexOutputBuffer> indexSortedSet; |
| | | private boolean poisonSeen = false; |
| | | |
| | | |
| | | public ScratchFileWriterTask(BlockingQueue<IndexOutputBuffer> queue, |
| | | IndexManager indexMgr) throws FileNotFoundException |
| | | { |
| | | this.queue = queue; |
| | | this.indexMgr = indexMgr; |
| | | this.bufferStream = new DataOutputStream(new BufferedOutputStream( |
| | | new FileOutputStream(indexMgr.getBufferFile()), |
| | | READER_WRITER_BUFFER_SIZE)); |
| | | this.bufferIndexStream = new DataOutputStream(new BufferedOutputStream( |
| | | new FileOutputStream(indexMgr.getBufferIndexFile()), |
| | | READER_WRITER_BUFFER_SIZE)); |
| | | this.bufferStream = |
| | | new DataOutputStream(new BufferedOutputStream(new FileOutputStream( |
| | | indexMgr.getBufferFile()), READER_WRITER_BUFFER_SIZE)); |
| | | this.bufferIndexStream = |
| | | new DataOutputStream(new BufferedOutputStream(new FileOutputStream( |
| | | indexMgr.getBufferIndexFile()), READER_WRITER_BUFFER_SIZE)); |
| | | this.indexSortedSet = new TreeSet<IndexOutputBuffer>(); |
| | | } |
| | | |
| | | |
| | | /** |
| | | * {@inheritDoc} |
| | | */ |
| | |
| | | { |
| | | long offset = 0; |
| | | List<IndexOutputBuffer> l = new LinkedList<IndexOutputBuffer>(); |
| | | try { |
| | | try |
| | | { |
| | | while(true) |
| | | { |
| | | final IndexOutputBuffer indexBuffer = queue.take(); |
| | |
| | | } |
| | | catch (IOException e) |
| | | { |
| | | Message message = ERR_JEB_IMPORT_LDIF_INDEX_FILEWRITER_ERR.get(indexMgr |
| | | Message message = |
| | | ERR_JEB_IMPORT_LDIF_INDEX_FILEWRITER_ERR.get(indexMgr |
| | | .getBufferFile().getAbsolutePath(), e.getMessage()); |
| | | logError(message); |
| | | isCanceled = true; |
| | |
| | | return null; |
| | | } |
| | | |
| | | |
| | | private long writeIndexBuffer(IndexOutputBuffer indexBuffer) |
| | | throws IOException |
| | | { |
| | | int numberKeys = indexBuffer.getNumberKeys(); |
| | | indexBuffer.setPosition(-1); |
| | | long bufferLen = 0; |
| | | insertByteStream.reset(); insertKeyCount = 0; |
| | | deleteByteStream.reset(); deleteKeyCount = 0; |
| | | insertByteStream.reset(); |
| | | insertKeyCount = 0; |
| | | deleteByteStream.reset(); |
| | | deleteKeyCount = 0; |
| | | for(int i = 0; i < numberKeys; i++) |
| | | { |
| | | if(indexBuffer.getPosition() == -1) |
| | |
| | | { |
| | | bufferLen += writeRecord(indexBuffer); |
| | | indexBuffer.setPosition(i); |
| | | insertByteStream.reset();insertKeyCount = 0; |
| | | deleteByteStream.reset();deleteKeyCount = 0; |
| | | insertByteStream.reset(); |
| | | insertKeyCount = 0; |
| | | deleteByteStream.reset(); |
| | | deleteKeyCount = 0; |
| | | } |
| | | if(indexBuffer.isInsert(i)) |
| | | { |
| | |
| | | return bufferLen; |
| | | } |
| | | |
| | | |
| | | private long writeIndexBuffers(List<IndexOutputBuffer> buffers) |
| | | throws IOException |
| | | { |
| | | long id = 0; |
| | | long bufferLen = 0; |
| | | insertByteStream.reset(); insertKeyCount = 0; |
| | | deleteByteStream.reset(); deleteKeyCount = 0; |
| | | insertByteStream.reset(); |
| | | insertKeyCount = 0; |
| | | deleteByteStream.reset(); |
| | | deleteKeyCount = 0; |
| | | for(IndexOutputBuffer b : buffers) |
| | | { |
| | | if(b.isPoison()) |
| | |
| | | return bufferLen; |
| | | } |
| | | |
| | | |
| | | private int writeByteStreams() throws IOException |
| | | { |
| | | if(insertKeyCount > indexMgr.getLimit()) |
| | |
| | | return insertSize + deleteSize; |
| | | } |
| | | |
| | | |
| | | private int writeHeader(int indexID, int keySize) throws IOException |
| | | { |
| | | bufferStream.writeInt(indexID); |
| | |
| | | return packedSize; |
| | | } |
| | | |
| | | |
| | | private int writeRecord(IndexOutputBuffer b) throws IOException |
| | | { |
| | | int keySize = b.getKeySize(); |
| | | int packedSize = writeHeader(b.getIndexID(), keySize); |
| | | b.writeKey(bufferStream); |
| | | packedSize += writeByteStreams(); |
| | | return (packedSize + keySize + insertByteStream.size() + |
| | | deleteByteStream.size() + 4); |
| | | return (packedSize + keySize + insertByteStream.size() |
| | | + deleteByteStream.size() + 4); |
| | | } |
| | | |
| | | |
| | | private int writeRecord(byte[] k, int indexID) throws IOException |
| | | { |
| | | int packedSize = writeHeader(indexID, k.length); |
| | | bufferStream.write(k); |
| | | packedSize += writeByteStreams(); |
| | | return (packedSize + k.length + insertByteStream.size() + |
| | | deleteByteStream.size() + 4); |
| | | return (packedSize + k.length + insertByteStream.size() |
| | | + deleteByteStream.size() + 4); |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * This task main function is to sort the index buffers given to it from |
| | | * the import tasks reading the LDIF file. It will also create a index |
| | | * file writer task and corresponding queue if needed. The sorted index |
| | | * buffers are put on the index file writer queues for writing to a temporary |
| | | * file. |
| | | * This task main function is to sort the index buffers given to it from the |
| | | * import tasks reading the LDIF file. It will also create a index file writer |
| | | * task and corresponding queue if needed. The sorted index buffers are put on |
| | | * the index file writer queues for writing to a temporary file. |
| | | */ |
| | | private final class SortTask implements Callable<Void> |
| | | { |
| | |
| | | indexBuffer.sort(); |
| | | if (indexKeyQueMap.containsKey(indexBuffer.getIndexKey())) |
| | | { |
| | | BlockingQueue<IndexOutputBuffer> q = indexKeyQueMap.get(indexBuffer |
| | | .getIndexKey()); |
| | | BlockingQueue<IndexOutputBuffer> q = |
| | | indexKeyQueMap.get(indexBuffer.getIndexKey()); |
| | | q.add(indexBuffer); |
| | | } |
| | | else |
| | | { |
| | | createIndexWriterTask(indexBuffer.getIndexKey()); |
| | | BlockingQueue<IndexOutputBuffer> q = indexKeyQueMap.get(indexBuffer |
| | | .getIndexKey()); |
| | | BlockingQueue<IndexOutputBuffer> q = |
| | | indexKeyQueMap.get(indexBuffer.getIndexKey()); |
| | | q.add(indexBuffer); |
| | | } |
| | | return null; |
| | |
| | | { |
| | | isDN = true; |
| | | } |
| | | IndexManager indexMgr = new IndexManager(indexKey.getName(), isDN, |
| | | IndexManager indexMgr = |
| | | new IndexManager(indexKey.getName(), isDN, |
| | | indexKey.getEntryLimit()); |
| | | if(isDN) |
| | | { |
| | |
| | | ScratchFileWriterTask indexWriter = |
| | | new ScratchFileWriterTask(newQue, indexMgr); |
| | | scratchFileWriterList.add(indexWriter); |
| | | scratchFileWriterFutures.add( |
| | | scratchFileWriterService.submit(indexWriter)); |
| | | scratchFileWriterFutures.add(scratchFileWriterService |
| | | .submit(indexWriter)); |
| | | indexKeyQueMap.put(indexKey, newQue); |
| | | } |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * The index manager class has several functions: |
| | | * |
| | | * 1. It used to carry information about index processing created in phase |
| | | * one to phase two. |
| | | * |
| | | * 2. It collects statistics about phase two processing for each index. |
| | | * |
| | | * 3. It manages opening and closing the scratch index files. |
| | | * The index manager class has several functions: 1. It used to carry |
| | | * information about index processing created in phase one to phase two. 2. It |
| | | * collects statistics about phase two processing for each index. 3. It |
| | | * manages opening and closing the scratch index files. |
| | | */ |
| | | final class IndexManager implements Comparable<IndexManager> |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private void setIndexDBWriteTask(IndexDBWriteTask writer) |
| | | { |
| | | this.writer = writer; |
| | | } |
| | | |
| | | |
| | | private File getBufferFile() |
| | | { |
| | | return bufferFile; |
| | | } |
| | | |
| | | |
| | | |
| | | private long getBufferFileSize() |
| | | { |
| | | return bufferFileSize; |
| | | } |
| | | |
| | | |
| | | |
| | | private File getBufferIndexFile() |
| | | { |
| | | return bufferIndexFile; |
| | | } |
| | | |
| | | |
| | | private void setBufferInfo(int numberOfBuffers, long bufferFileSize) |
| | | { |
| | | this.numberOfBuffers = numberOfBuffers; |
| | | this.bufferFileSize = bufferFileSize; |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Updates the bytes read counter. |
| | | * |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private void addTotDNCount(int delta) |
| | | { |
| | | totalDNS += delta; |
| | | } |
| | | |
| | | |
| | | private long getDNCount() |
| | | { |
| | | return totalDNS; |
| | | } |
| | | |
| | | |
| | | private boolean isDN2ID() |
| | | { |
| | | return isDN; |
| | | } |
| | | |
| | | |
| | | private void printStats(long deltaTime) |
| | | { |
| | | if (writer != null) |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Returns the file name associated with this index manager. |
| | | * |
| | |
| | | return bufferFileName; |
| | | } |
| | | |
| | | |
| | | private int getLimit() |
| | | { |
| | | return limit; |
| | | } |
| | | |
| | | |
| | | /** |
| | | * {@inheritDoc} |
| | | */ |
| | |
| | | return numberOfBuffers - mgr.numberOfBuffers; |
| | | } |
| | | |
| | | |
| | | |
| | | private int getNumberOfBuffers() |
| | | { |
| | | return numberOfBuffers; |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * The rebuild index manager handles all rebuild index related processing. |
| | | */ |
| | | private class RebuildIndexManager extends ImportTask |
| | | implements DiskSpaceMonitorHandler |
| | | private class RebuildIndexManager extends ImportTask implements |
| | | DiskSpaceMonitorHandler |
| | | { |
| | | |
| | | //Rebuild index configuration. |
| | |
| | | //The entry container. |
| | | private EntryContainer entryContainer; |
| | | |
| | | |
| | | /** |
| | | * Create an instance of the rebuild index manager using the specified |
| | | * parameters. |
| | | * |
| | | * @param rebuildConfig The rebuild configuration to use. |
| | | * @param cfg The local DB configuration to use. |
| | | * @param rebuildConfig |
| | | * The rebuild configuration to use. |
| | | * @param cfg |
| | | * The local DB configuration to use. |
| | | */ |
| | | public RebuildIndexManager(RebuildConfig rebuildConfig, |
| | | LocalDBBackendCfg cfg) |
| | |
| | | this.cfg = cfg; |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Initialize a rebuild index manager. |
| | | * |
| | | * @throws ConfigException If an configuration error occurred. |
| | | * @throws InitializationException If an initialization error occurred. |
| | | * @throws ConfigException |
| | | * If an configuration error occurred. |
| | | * @throws InitializationException |
| | | * If an initialization error occurred. |
| | | */ |
| | | public void initialize() throws ConfigException, InitializationException |
| | | { |
| | |
| | | suffix = Suffix.createSuffixContext(entryContainer, null, null, null); |
| | | if(suffix == null) |
| | | { |
| | | Message msg = ERR_JEB_REBUILD_SUFFIX_ERROR.get(rebuildConfig. |
| | | getBaseDN().toString()); |
| | | Message msg = |
| | | ERR_JEB_REBUILD_SUFFIX_ERROR.get(rebuildConfig.getBaseDN() |
| | | .toString()); |
| | | throw new InitializationException(msg); |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Print start message. |
| | | * |
| | | * @throws DatabaseException If an database error occurred. |
| | | * @throws DatabaseException |
| | | * If an database error occurred. |
| | | */ |
| | | public void printStartMessage() throws DatabaseException |
| | | { |
| | |
| | | totalEntries = suffix.getID2Entry().getRecordCount(); |
| | | |
| | | Message message; |
| | | switch (rebuildConfig.getRebuildMode()) { |
| | | switch (rebuildConfig.getRebuildMode()) |
| | | { |
| | | case ALL: |
| | | message = NOTE_JEB_REBUILD_ALL_START.get(totalEntries); |
| | | break; |
| | |
| | | logError(message); |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Print stop message. |
| | | * |
| | | * @param startTime The time the rebuild started. |
| | | * @param startTime |
| | | * The time the rebuild started. |
| | | */ |
| | | public void printStopMessage(long startTime) |
| | | { |
| | |
| | | logError(message); |
| | | } |
| | | |
| | | |
| | | /** |
| | | * {@inheritDoc} |
| | | */ |
| | |
| | | id2entry.openCursor(DiskOrderedCursorConfig.DEFAULT); |
| | | DatabaseEntry key = new DatabaseEntry(); |
| | | DatabaseEntry data = new DatabaseEntry(); |
| | | try { |
| | | try |
| | | { |
| | | while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS) |
| | | { |
| | | if(isCanceled) |
| | |
| | | return null; |
| | | } |
| | | EntryID entryID = new EntryID(key); |
| | | Entry entry = ID2Entry.entryFromDatabase( |
| | | ByteString.wrap(data.getData()), |
| | | Entry entry = |
| | | ID2Entry.entryFromDatabase(ByteString.wrap(data.getData()), |
| | | entryContainer.getRootContainer().getCompressedSchema()); |
| | | processEntry(entry, entryID); |
| | | entriesProcessed.getAndIncrement(); |
| | |
| | | return null; |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Perform rebuild index processing. |
| | | * |
| | |
| | | * @throws InterruptedException |
| | | * If an interrupted error occurred. |
| | | * @throws ExecutionException |
| | | * If an Excecution error occurred. |
| | | * If an Execution error occurred. |
| | | * @throws JebException |
| | | * If an JEB error occurred. |
| | | */ |
| | | public void rebuildIndexes() throws |
| | | DatabaseException, InterruptedException, ExecutionException, |
| | | JebException |
| | | public void rebuildIndexes() throws DatabaseException, |
| | | InterruptedException, ExecutionException, JebException |
| | | { |
| | | switch (rebuildConfig.getRebuildMode()) |
| | | { |
| | | case ALL: |
| | | setAllIndexesTrusted(false); |
| | | break; |
| | | case DEGRADED: |
| | | // Nothing to do: degraded indexes are already untrusted. |
| | | break; |
| | | default: |
| | | setRebuildListIndexesTrusted(false); |
| | | break; |
| | | } |
| | | // Sets only the needed indexes. |
| | | setIndexesListsToBeRebuilt(); |
| | | |
| | | if (!rebuildConfig.isClearDegradedState()) |
| | | { |
| | | // If not in a 'clear degraded state' operation, |
| | | // need to rebuild the indexes. |
| | | setRebuildListIndexesTrusted(false); |
| | | clearIndexes(true); |
| | | phaseOne(); |
| | | if (isCanceled) |
| | | { |
| | | throw new InterruptedException("Rebuild Index canceled."); |
| | | } |
| | | phaseTwo(); |
| | | } |
| | | |
| | | switch (rebuildConfig.getRebuildMode()) |
| | | setRebuildListIndexesTrusted(true); |
| | | } |
| | | |
| | | @SuppressWarnings("fallthrough") |
| | | private void setIndexesListsToBeRebuilt() throws JebException |
| | | { |
| | | // Depends on rebuild mode, (re)building indexes' lists. |
| | | final RebuildMode mode = rebuildConfig.getRebuildMode(); |
| | | switch (mode) |
| | | { |
| | | case ALL: |
| | | rebuildIndexMap(false); |
| | | // falls through |
| | | case DEGRADED: |
| | | setAllIndexesTrusted(true); |
| | | if ((mode == RebuildMode.ALL) |
| | | || (!entryContainer.getID2Children().isTrusted() || !entryContainer |
| | | .getID2Subtree().isTrusted())) |
| | | { |
| | | dn2id = entryContainer.getDN2ID(); |
| | | } |
| | | if ((mode == RebuildMode.ALL) || entryContainer.getDN2URI() == null) |
| | | { |
| | | dn2uri = entryContainer.getDN2URI(); |
| | | } |
| | | if ((mode == RebuildMode.DEGRADED) |
| | | || entryContainer.getAttributeIndexes().isEmpty()) |
| | | { |
| | | rebuildIndexMap(true); // only degraded. |
| | | } |
| | | if ((mode == RebuildMode.ALL) || vlvIndexes.isEmpty()) |
| | | { |
| | | vlvIndexes.addAll(new LinkedList<VLVIndex>(entryContainer |
| | | .getVLVIndexes())); |
| | | } |
| | | break; |
| | | |
| | | case USER_DEFINED: |
| | | // false may be required if the user wants to rebuild specific index. |
| | | rebuildIndexMap(false); |
| | | break; |
| | | default: |
| | | setRebuildListIndexesTrusted(true); |
| | | break; |
| | | } |
| | | } |
| | | |
| | | private void rebuildIndexMap(final boolean onlyDegraded) |
| | | { |
| | | // rebuildList contains the user-selected index(in USER_DEFINED mode). |
| | | final List<String> rebuildList = rebuildConfig.getRebuildList(); |
| | | for (final Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix |
| | | .getAttrIndexMap().entrySet()) |
| | | { |
| | | final AttributeType attributeType = mapEntry.getKey(); |
| | | final AttributeIndex attributeIndex = mapEntry.getValue(); |
| | | if (rebuildConfig.getRebuildMode() == RebuildMode.ALL |
| | | || rebuildConfig.getRebuildMode() == RebuildMode.DEGRADED) |
| | | { |
| | | // Get all existing indexes for all && degraded mode. |
| | | rebuildAttributeIndexes(attributeIndex, attributeType, onlyDegraded); |
| | | } |
| | | else |
| | | { |
| | | // Get indexes for user defined index. |
| | | if (!rebuildList.isEmpty()) |
| | | { |
| | | for (final String index : rebuildList) |
| | | { |
| | | if (attributeType.getNameOrOID().toLowerCase().equals( |
| | | index.toLowerCase())) |
| | | { |
| | | rebuildAttributeIndexes(attributeIndex, attributeType, |
| | | onlyDegraded); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | private void rebuildAttributeIndexes(final AttributeIndex attrIndex, |
| | | final AttributeType attrType, final boolean onlyDegraded) |
| | | throws DatabaseException |
| | | { |
| | | if (attrIndex.getSubstringIndex() != null) |
| | | { |
| | | fillIndexMap(attrType, attrIndex.getSubstringIndex(), |
| | | ImportIndexType.SUBSTRING, onlyDegraded); |
| | | } |
| | | if (attrIndex.getOrderingIndex() != null) |
| | | { |
| | | fillIndexMap(attrType, attrIndex.getOrderingIndex(), |
| | | ImportIndexType.ORDERING, onlyDegraded); |
| | | } |
| | | if (attrIndex.getEqualityIndex() != null) |
| | | { |
| | | fillIndexMap(attrType, attrIndex.getEqualityIndex(), |
| | | ImportIndexType.EQUALITY, onlyDegraded); |
| | | } |
| | | if (attrIndex.getPresenceIndex() != null) |
| | | { |
| | | fillIndexMap(attrType, attrIndex.getPresenceIndex(), |
| | | ImportIndexType.PRESENCE, onlyDegraded); |
| | | } |
| | | if (attrIndex.getApproximateIndex() != null) |
| | | { |
| | | fillIndexMap(attrType, attrIndex.getApproximateIndex(), |
| | | ImportIndexType.APPROXIMATE, onlyDegraded); |
| | | } |
| | | final Map<String, Collection<Index>> extensibleMap = |
| | | attrIndex.getExtensibleIndexes(); |
| | | if (!extensibleMap.isEmpty()) |
| | | { |
| | | final Collection<Index> subIndexes = |
| | | attrIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | if (subIndexes != null && !subIndexes.isEmpty()) |
| | | { |
| | | final List<Index> mutableCopy = new LinkedList<Index>(subIndexes); |
| | | final Iterator<Index> i = mutableCopy.iterator(); |
| | | while (i.hasNext()) |
| | | { |
| | | final Index subIndex = i.next(); |
| | | if (!onlyDegraded || !subIndex.isTrusted()) |
| | | { |
| | | if ((rebuildConfig.isClearDegradedState() && subIndex |
| | | .getRecordCount() == 0) |
| | | || !rebuildConfig.isClearDegradedState()) |
| | | { |
| | | int id = System.identityHashCode(subIndex); |
| | | idContainerMap.putIfAbsent(id, subIndex); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | // This index is not a candidate for rebuilding. |
| | | i.remove(); |
| | | } |
| | | } |
| | | if (!mutableCopy.isEmpty()) |
| | | { |
| | | extensibleIndexMap.put(new IndexKey(attrType, |
| | | ImportIndexType.EX_SUBSTRING, 0), mutableCopy); |
| | | } |
| | | } |
| | | final Collection<Index> sharedIndexes = |
| | | attrIndex.getExtensibleIndexes().get(EXTENSIBLE_INDEXER_ID_SHARED); |
| | | if (sharedIndexes != null && !sharedIndexes.isEmpty()) |
| | | { |
| | | final List<Index> mutableCopy = new LinkedList<Index>(sharedIndexes); |
| | | final Iterator<Index> i = mutableCopy.iterator(); |
| | | while (i.hasNext()) |
| | | { |
| | | final Index sharedIndex = i.next(); |
| | | if (!onlyDegraded || !sharedIndex.isTrusted()) |
| | | { |
| | | if ((rebuildConfig.isClearDegradedState() && sharedIndex |
| | | .getRecordCount() == 0) |
| | | || !rebuildConfig.isClearDegradedState()) |
| | | { |
| | | int id = System.identityHashCode(sharedIndex); |
| | | idContainerMap.putIfAbsent(id, sharedIndex); |
| | | } |
| | | } |
| | | else |
| | | { |
| | | // This index is not a candidate for rebuilding. |
| | | i.remove(); |
| | | } |
| | | } |
| | | if (!mutableCopy.isEmpty()) |
| | | { |
| | | extensibleIndexMap.put(new IndexKey(attrType, |
| | | ImportIndexType.EX_SHARED, 0), mutableCopy); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | private void fillIndexMap(final AttributeType attrType, |
| | | final Index partialAttrIndex, final ImportIndexType importIndexType, |
| | | final boolean onlyDegraded) |
| | | { |
| | | if ((!onlyDegraded || !partialAttrIndex.isTrusted())) |
| | | { |
| | | if ((rebuildConfig.isClearDegradedState() && partialAttrIndex |
| | | .getRecordCount() == 0) |
| | | || !rebuildConfig.isClearDegradedState()) |
| | | { |
| | | final int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | final IndexKey indexKey = |
| | | new IndexKey(attrType, importIndexType, partialAttrIndex |
| | | .getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | } |
| | | } |
| | | |
| | | private void clearIndexes(boolean onlyDegraded) throws DatabaseException |
| | | { |
| | | // Clears all the entry's container databases |
| | | // which are containing the indexes. |
| | | |
| | | if (!onlyDegraded) |
| | | { |
| | | // dn2uri does not have a trusted status. |
| | | entryContainer.clearDatabase(entryContainer.getDN2URI()); |
| | | } |
| | | |
| | | if (!onlyDegraded || !entryContainer.getID2Children().isTrusted() |
| | | || !entryContainer.getID2Subtree().isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(entryContainer.getDN2ID()); |
| | | entryContainer.clearDatabase(entryContainer.getID2Children()); |
| | | entryContainer.clearDatabase(entryContainer.getID2Subtree()); |
| | | } |
| | | |
| | | if (!indexMap.isEmpty()) |
| | | { |
| | | for (final Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet()) |
| | | { |
| | | if (!onlyDegraded || !mapEntry.getValue().isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(mapEntry.getValue()); |
| | | } |
| | | } |
| | | } |
| | | |
| | | if (!extensibleIndexMap.isEmpty()) |
| | | { |
| | | for (final Collection<Index> subIndexes : extensibleIndexMap.values()) |
| | | { |
| | | if (subIndexes != null) |
| | | { |
| | | for (final Index subIndex : subIndexes) |
| | | { |
| | | entryContainer.clearDatabase(subIndex); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | for (final VLVIndex vlvIndex : entryContainer.getVLVIndexes()) |
| | | { |
| | | if (!onlyDegraded || !vlvIndex.isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(vlvIndex); |
| | | } |
| | | } |
| | | } |
| | | |
| | | private void setRebuildListIndexesTrusted(boolean trusted) |
| | | throws JebException |
| | |
| | | } |
| | | if(!indexMap.isEmpty()) |
| | | { |
| | | for(Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet()) { |
| | | for (Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet()) |
| | | { |
| | | Index index = mapEntry.getValue(); |
| | | index.setTrusted(null, trusted); |
| | | } |
| | |
| | | { |
| | | for(Collection<Index> subIndexes : extensibleIndexMap.values()) |
| | | { |
| | | if(subIndexes != null) { |
| | | for(Index subIndex : subIndexes) { |
| | | if (subIndexes != null) |
| | | { |
| | | for (Index subIndex : subIndexes) |
| | | { |
| | | subIndex.setTrusted(null, trusted); |
| | | } |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private void setAllIndexesTrusted(boolean trusted) throws JebException |
| | | private void phaseOne() throws DatabaseException, InterruptedException, |
| | | ExecutionException |
| | | { |
| | | try { |
| | | suffix.setIndexesTrusted(trusted); |
| | | } |
| | | catch (DatabaseException ex) |
| | | { |
| | | Message message = |
| | | NOTE_JEB_IMPORT_LDIF_TRUSTED_FAILED.get(ex.getMessage()); |
| | | throw new JebException(message); |
| | | } |
| | | } |
| | | |
| | | |
| | | private void phaseOne() throws DatabaseException, |
| | | InterruptedException, ExecutionException { |
| | | switch (rebuildConfig.getRebuildMode()) |
| | | { |
| | | case ALL: |
| | | clearAllIndexes(false); |
| | | break; |
| | | case DEGRADED: |
| | | clearAllIndexes(true); |
| | | break; |
| | | default: |
| | | clearRebuildListIndexes(); |
| | | break; |
| | | } |
| | | |
| | | initializeIndexBuffers(); |
| | | RebuildFirstPhaseProgressTask progressTask = |
| | | new RebuildFirstPhaseProgressTask(); |
| | |
| | | tasks.add(this); |
| | | } |
| | | List<Future<Void>> results = rebuildIndexService.invokeAll(tasks); |
| | | for (Future<Void> result : results) { |
| | | if(!result.isDone()) { |
| | | for (Future<Void> result : results) |
| | | { |
| | | if (!result.isDone()) |
| | | { |
| | | result.get(); |
| | | } |
| | | } |
| | | stopScratchFileWriters(); |
| | | for (Future<?> result : scratchFileWriterFutures) |
| | | { |
| | | if(!result.isDone()) { |
| | | if (!result.isDone()) |
| | | { |
| | | result.get(); |
| | | } |
| | | } |
| | |
| | | freeBufferQueue.clear(); |
| | | } |
| | | |
| | | |
| | | |
| | | private void phaseTwo() |
| | | throws InterruptedException, ExecutionException |
| | | private void phaseTwo() throws InterruptedException, ExecutionException |
| | | { |
| | | SecondPhaseProgressTask progressTask = new SecondPhaseProgressTask( |
| | | entriesProcessed.get()); |
| | | SecondPhaseProgressTask progressTask = |
| | | new SecondPhaseProgressTask(entriesProcessed.get()); |
| | | Timer timer2 = new Timer(); |
| | | timer2.scheduleAtFixedRate(progressTask, TIMER_INTERVAL, TIMER_INTERVAL); |
| | | processIndexFiles(); |
| | | timer2.cancel(); |
| | | } |
| | | |
| | | |
| | | private int getIndexCount() throws ConfigException, JebException |
| | | { |
| | | switch (rebuildConfig.getRebuildMode()) |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | private int getRebuildListIndexCount(LocalDBBackendCfg cfg) |
| | | throws JebException, ConfigException |
| | | { |
| | |
| | | throw new JebException(msg); |
| | | } |
| | | indexCount++; |
| | | } else if(lowerName.equals("id2subtree") || |
| | | lowerName.equals("id2children")) |
| | | } |
| | | else if (lowerName.equals("id2subtree") |
| | | || lowerName.equals("id2children")) |
| | | { |
| | | Message msg = ERR_JEB_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index); |
| | | throw new JebException(msg); |
| | |
| | | else if(attrIndexParts[1].equals("approximate")) |
| | | { |
| | | indexCount++; |
| | | } else { |
| | | } |
| | | else |
| | | { |
| | | Message msg = |
| | | ERR_JEB_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index); |
| | | throw new JebException(msg); |
| | |
| | | for (String idx : cfg.listLocalDBIndexes()) |
| | | { |
| | | LocalDBIndexCfg indexCfg = cfg.getLocalDBIndex(idx); |
| | | if (indexCfg.getIndexType(). |
| | | contains(LocalDBIndexCfgDefn.IndexType.EXTENSIBLE)) |
| | | if (indexCfg.getIndexType().contains( |
| | | LocalDBIndexCfgDefn.IndexType.EXTENSIBLE)) |
| | | { |
| | | Set<String> extensibleRules = |
| | | indexCfg.getIndexExtensibleMatchingRule(); |
| | |
| | | break; |
| | | } |
| | | } |
| | | if(!found) { |
| | | if (!found) |
| | | { |
| | | Message msg = |
| | | ERR_JEB_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index); |
| | | throw new JebException(msg); |
| | |
| | | continue; |
| | | } |
| | | LocalDBIndexCfg indexCfg = cfg.getLocalDBIndex(idx); |
| | | if(indexCfg.getIndexType(). |
| | | contains(LocalDBIndexCfgDefn.IndexType.EQUALITY)) |
| | | if (indexCfg.getIndexType().contains( |
| | | LocalDBIndexCfgDefn.IndexType.EQUALITY)) |
| | | { |
| | | indexCount++; |
| | | } |
| | | if(indexCfg.getIndexType(). |
| | | contains(LocalDBIndexCfgDefn.IndexType.ORDERING)) |
| | | if (indexCfg.getIndexType().contains( |
| | | LocalDBIndexCfgDefn.IndexType.ORDERING)) |
| | | { |
| | | indexCount++; |
| | | } |
| | | if(indexCfg.getIndexType(). |
| | | contains(LocalDBIndexCfgDefn.IndexType.PRESENCE)) |
| | | if (indexCfg.getIndexType().contains( |
| | | LocalDBIndexCfgDefn.IndexType.PRESENCE)) |
| | | { |
| | | indexCount++; |
| | | } |
| | | if(indexCfg.getIndexType(). |
| | | contains(LocalDBIndexCfgDefn.IndexType.SUBSTRING)) |
| | | if (indexCfg.getIndexType().contains( |
| | | LocalDBIndexCfgDefn.IndexType.SUBSTRING)) |
| | | { |
| | | indexCount++; |
| | | } |
| | | if(indexCfg.getIndexType(). |
| | | contains(LocalDBIndexCfgDefn.IndexType.APPROXIMATE)) |
| | | if (indexCfg.getIndexType().contains( |
| | | LocalDBIndexCfgDefn.IndexType.APPROXIMATE)) |
| | | { |
| | | indexCount++; |
| | | } |
| | | if (indexCfg.getIndexType(). |
| | | contains(LocalDBIndexCfgDefn.IndexType.EXTENSIBLE)) |
| | | if (indexCfg.getIndexType().contains( |
| | | LocalDBIndexCfgDefn.IndexType.EXTENSIBLE)) |
| | | { |
| | | Set<String> extensibleRules = |
| | | indexCfg.getIndexExtensibleMatchingRule(); |
| | |
| | | return indexCount; |
| | | } |
| | | |
| | | |
| | | private void clearRebuildListIndexes() throws DatabaseException |
| | | { |
| | | List<String> rebuildList = rebuildConfig.getRebuildList(); |
| | | if(!rebuildList.isEmpty()) |
| | | { |
| | | for (String index : rebuildList) |
| | | { |
| | | String lowerName = index.toLowerCase(); |
| | | if (lowerName.equals("dn2id")) |
| | | { |
| | | clearDN2IDIndexes(false); |
| | | } |
| | | else if (lowerName.equals("dn2uri")) |
| | | { |
| | | clearDN2URI(false); |
| | | } |
| | | else if (lowerName.startsWith("vlv.")) |
| | | { |
| | | VLVIndex vlvIndex = entryContainer.getVLVIndex(lowerName |
| | | .substring(4)); |
| | | clearVLVIndex(vlvIndex, false); |
| | | } |
| | | else |
| | | { |
| | | String[] attrIndexParts = lowerName.split("\\."); |
| | | AttributeType attrType = |
| | | DirectoryServer.getAttributeType(attrIndexParts[0]); |
| | | AttributeIndex attrIndex = |
| | | entryContainer.getAttributeIndex(attrType); |
| | | if(attrIndexParts.length != 1) |
| | | { |
| | | Index partialAttrIndex; |
| | | if(attrIndexParts[1].equals("presence")) |
| | | { |
| | | partialAttrIndex = attrIndex.getPresenceIndex(); |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = |
| | | new IndexKey(attrType, ImportIndexType.PRESENCE, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | else if(attrIndexParts[1].equals("equality")) |
| | | { |
| | | partialAttrIndex = attrIndex.getEqualityIndex(); |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = |
| | | new IndexKey(attrType, ImportIndexType.EQUALITY, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | else if(attrIndexParts[1].equals("substring")) |
| | | { |
| | | partialAttrIndex = attrIndex.getSubstringIndex(); |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = |
| | | new IndexKey(attrType, ImportIndexType.SUBSTRING, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | else if(attrIndexParts[1].equals("ordering")) |
| | | { |
| | | partialAttrIndex = attrIndex.getOrderingIndex(); |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = |
| | | new IndexKey(attrType, ImportIndexType.ORDERING, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | else if(attrIndexParts[1].equals("approximate")) |
| | | { |
| | | partialAttrIndex = attrIndex.getApproximateIndex(); |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = |
| | | new IndexKey(attrType, ImportIndexType.APPROXIMATE, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | else |
| | | { |
| | | String dbPart = "shared"; |
| | | if(attrIndexParts[2].startsWith("sub")) |
| | | { |
| | | dbPart = "substring"; |
| | | } |
| | | StringBuilder nameBldr = new StringBuilder(); |
| | | nameBldr.append(entryContainer.getDatabasePrefix()); |
| | | nameBldr.append("_"); |
| | | nameBldr.append(attrIndexParts[0]); |
| | | nameBldr.append("."); |
| | | nameBldr.append(attrIndexParts[1]); |
| | | nameBldr.append("."); |
| | | nameBldr.append(dbPart); |
| | | String indexName = nameBldr.toString(); |
| | | Map<String,Collection<Index>> extensibleMap = |
| | | attrIndex.getExtensibleIndexes(); |
| | | if(!extensibleMap.isEmpty()) { |
| | | Collection<Index> subIndexes = |
| | | attrIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | if(subIndexes != null) { |
| | | for(Index subIndex : subIndexes) { |
| | | String name = subIndex.getName(); |
| | | if(name.equalsIgnoreCase(indexName)) |
| | | { |
| | | entryContainer.clearDatabase(subIndex); |
| | | int id = System.identityHashCode(subIndex); |
| | | idContainerMap.putIfAbsent(id, subIndex); |
| | | Collection<Index> substring = new ArrayList<Index>(); |
| | | substring.add(subIndex); |
| | | extensibleIndexMap.put(new IndexKey(attrType, |
| | | ImportIndexType.EX_SUBSTRING, 0),substring); |
| | | break; |
| | | } |
| | | } |
| | | Collection<Index> sharedIndexes = |
| | | attrIndex.getExtensibleIndexes(). |
| | | get(EXTENSIBLE_INDEXER_ID_SHARED); |
| | | if(sharedIndexes !=null) { |
| | | for(Index sharedIndex : sharedIndexes) { |
| | | String name = sharedIndex.getName(); |
| | | if(name.equalsIgnoreCase(indexName)) |
| | | { |
| | | entryContainer.clearDatabase(sharedIndex); |
| | | Collection<Index> shared = new ArrayList<Index>(); |
| | | int id = System.identityHashCode(sharedIndex); |
| | | idContainerMap.putIfAbsent(id, sharedIndex); |
| | | shared.add(sharedIndex); |
| | | extensibleIndexMap.put(new IndexKey(attrType, |
| | | ImportIndexType.EX_SHARED, 0), shared); |
| | | break; |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | else |
| | | { |
| | | clearAttributeIndexes(attrIndex, attrType, false); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | |
| | | private void clearAllIndexes(boolean onlyDegraded) throws DatabaseException |
| | | { |
| | | for(Map.Entry<AttributeType, AttributeIndex> mapEntry : |
| | | suffix.getAttrIndexMap().entrySet()) { |
| | | AttributeType attributeType = mapEntry.getKey(); |
| | | AttributeIndex attributeIndex = mapEntry.getValue(); |
| | | clearAttributeIndexes(attributeIndex, attributeType, onlyDegraded); |
| | | } |
| | | for(VLVIndex vlvIndex : suffix.getEntryContainer().getVLVIndexes()) { |
| | | clearVLVIndex(vlvIndex, onlyDegraded); |
| | | } |
| | | clearDN2IDIndexes(onlyDegraded); |
| | | if(entryContainer.getDN2URI() != null) |
| | | { |
| | | clearDN2URI(onlyDegraded); |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | private void clearVLVIndex(VLVIndex vlvIndex, boolean onlyDegraded) |
| | | throws DatabaseException |
| | | { |
| | | if (!onlyDegraded || !vlvIndex.isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(vlvIndex); |
| | | vlvIndexes.add(vlvIndex); |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | private void clearDN2URI(boolean onlyDegraded) throws DatabaseException |
| | | { |
| | | if (!onlyDegraded) |
| | | { |
| | | // dn2uri does not have a trusted status. |
| | | entryContainer.clearDatabase(entryContainer.getDN2URI()); |
| | | dn2uri = entryContainer.getDN2URI(); |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | private void clearDN2IDIndexes(boolean onlyDegraded) |
| | | throws DatabaseException |
| | | { |
| | | if (!onlyDegraded || !entryContainer.getID2Children().isTrusted() |
| | | || !entryContainer.getID2Subtree().isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(entryContainer.getDN2ID()); |
| | | entryContainer.clearDatabase(entryContainer.getID2Children()); |
| | | entryContainer.clearDatabase(entryContainer.getID2Subtree()); |
| | | dn2id = entryContainer.getDN2ID(); |
| | | } |
| | | } |
| | | |
| | | |
| | | private void clearAttributeIndexes(AttributeIndex attrIndex, |
| | | AttributeType attrType, |
| | | boolean onlyDegraded) |
| | | throws DatabaseException |
| | | { |
| | | if (attrIndex.getSubstringIndex() != null) |
| | | { |
| | | Index partialAttrIndex = attrIndex.getSubstringIndex(); |
| | | if (!onlyDegraded || !partialAttrIndex.isTrusted()) |
| | | { |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = new IndexKey(attrType, ImportIndexType.SUBSTRING, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | } |
| | | if (attrIndex.getOrderingIndex() != null) |
| | | { |
| | | Index partialAttrIndex = attrIndex.getOrderingIndex(); |
| | | if (!onlyDegraded || !partialAttrIndex.isTrusted()) |
| | | { |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = new IndexKey(attrType, ImportIndexType.ORDERING, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | } |
| | | if(attrIndex.getEqualityIndex() != null) |
| | | { |
| | | Index partialAttrIndex = attrIndex.getEqualityIndex(); |
| | | if (!onlyDegraded || !partialAttrIndex.isTrusted()) |
| | | { |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = new IndexKey(attrType, ImportIndexType.EQUALITY, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | } |
| | | if (attrIndex.getPresenceIndex() != null) |
| | | { |
| | | Index partialAttrIndex = attrIndex.getPresenceIndex(); |
| | | if (!onlyDegraded || !partialAttrIndex.isTrusted()) |
| | | { |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = new IndexKey(attrType, ImportIndexType.PRESENCE, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | } |
| | | if (attrIndex.getApproximateIndex() != null) |
| | | { |
| | | Index partialAttrIndex = attrIndex.getApproximateIndex(); |
| | | if (!onlyDegraded || !partialAttrIndex.isTrusted()) |
| | | { |
| | | int id = System.identityHashCode(partialAttrIndex); |
| | | idContainerMap.putIfAbsent(id, partialAttrIndex); |
| | | entryContainer.clearDatabase(partialAttrIndex); |
| | | IndexKey indexKey = new IndexKey(attrType, |
| | | ImportIndexType.APPROXIMATE, |
| | | partialAttrIndex.getIndexEntryLimit()); |
| | | indexMap.put(indexKey, partialAttrIndex); |
| | | } |
| | | } |
| | | Map<String, Collection<Index>> extensibleMap = attrIndex |
| | | .getExtensibleIndexes(); |
| | | if (!extensibleMap.isEmpty()) |
| | | { |
| | | Collection<Index> subIndexes = attrIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SUBSTRING); |
| | | if (subIndexes != null && !subIndexes.isEmpty()) |
| | | { |
| | | List<Index> mutableCopy = new LinkedList<Index>(subIndexes); |
| | | Iterator<Index> i = mutableCopy.iterator(); |
| | | while (i.hasNext()) |
| | | { |
| | | Index subIndex = i.next(); |
| | | if (!onlyDegraded || !subIndex.isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(subIndex); |
| | | int id = System.identityHashCode(subIndex); |
| | | idContainerMap.putIfAbsent(id, subIndex); |
| | | } |
| | | else |
| | | { |
| | | // This index is not a candidate for rebuilding. |
| | | i.remove(); |
| | | } |
| | | } |
| | | if (!mutableCopy.isEmpty()) |
| | | { |
| | | extensibleIndexMap.put(new IndexKey(attrType, |
| | | ImportIndexType.EX_SUBSTRING, 0), mutableCopy); |
| | | } |
| | | } |
| | | Collection<Index> sharedIndexes = attrIndex.getExtensibleIndexes().get( |
| | | EXTENSIBLE_INDEXER_ID_SHARED); |
| | | if (sharedIndexes != null && !sharedIndexes.isEmpty()) |
| | | { |
| | | List<Index> mutableCopy = new LinkedList<Index>(sharedIndexes); |
| | | Iterator<Index> i = mutableCopy.iterator(); |
| | | while (i.hasNext()) |
| | | { |
| | | Index sharedIndex = i.next(); |
| | | if (!onlyDegraded || !sharedIndex.isTrusted()) |
| | | { |
| | | entryContainer.clearDatabase(sharedIndex); |
| | | int id = System.identityHashCode(sharedIndex); |
| | | idContainerMap.putIfAbsent(id, sharedIndex); |
| | | } |
| | | else |
| | | { |
| | | // This index is not a candidate for rebuilding. |
| | | i.remove(); |
| | | } |
| | | } |
| | | if (!mutableCopy.isEmpty()) |
| | | { |
| | | extensibleIndexMap.put(new IndexKey(attrType, |
| | | ImportIndexType.EX_SHARED, 0), mutableCopy); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | |
| | | private |
| | | void processEntry(Entry entry, EntryID entryID) throws DatabaseException, |
| | | DirectoryException, JebException, InterruptedException |
| | | private void processEntry(Entry entry, EntryID entryID) |
| | | throws DatabaseException, DirectoryException, JebException, |
| | | InterruptedException |
| | | { |
| | | if(dn2id != null) |
| | | { |
| | |
| | | processVLVIndexes(entry, entryID); |
| | | } |
| | | |
| | | |
| | | private void processVLVIndexes(Entry entry, EntryID entryID) |
| | | throws DatabaseException, JebException, DirectoryException |
| | | { |
| | | for(VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) { |
| | | for (VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) |
| | | { |
| | | Transaction transaction = null; |
| | | vlvIdx.addEntry(transaction, entryID, entry); |
| | | } |
| | | } |
| | | |
| | | |
| | | private |
| | | void processExtensibleIndexes(Entry entry, EntryID entryID) throws |
| | | InterruptedException |
| | | private void processExtensibleIndexes(Entry entry, EntryID entryID) |
| | | throws InterruptedException |
| | | { |
| | | for(Map.Entry<IndexKey, Collection<Index>> mapEntry : |
| | | this.extensibleIndexMap.entrySet()) { |
| | | this.extensibleIndexMap.entrySet()) |
| | | { |
| | | IndexKey key = mapEntry.getKey(); |
| | | AttributeType attrType = key.getAttributeType(); |
| | | if(entry.hasAttribute(attrType)) { |
| | | if (entry.hasAttribute(attrType)) |
| | | { |
| | | Collection<Index> indexes = mapEntry.getValue(); |
| | | for(Index index : indexes) { |
| | | for (Index index : indexes) |
| | | { |
| | | processAttribute(index, entry, entryID, key); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | |
| | | private void |
| | | processIndexes(Entry entry, EntryID entryID) throws |
| | | DatabaseException, InterruptedException |
| | | private void processIndexes(Entry entry, EntryID entryID) |
| | | throws DatabaseException, InterruptedException |
| | | { |
| | | for(Map.Entry<IndexKey, Index> mapEntry : |
| | | indexMap.entrySet()) { |
| | | for (Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet()) |
| | | { |
| | | IndexKey key = mapEntry.getKey(); |
| | | AttributeType attrType = key.getAttributeType(); |
| | | if(entry.hasAttribute(attrType)) { |
| | | if (entry.hasAttribute(attrType)) |
| | | { |
| | | ImportIndexType indexType = key.getIndexType(); |
| | | Index index = mapEntry.getValue(); |
| | | if(indexType == ImportIndexType.SUBSTRING) |
| | | { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attrType, ImportIndexType.SUBSTRING, |
| | | index.getIndexEntryLimit())); |
| | | processAttribute(index, entry, entryID, new IndexKey(attrType, |
| | | ImportIndexType.SUBSTRING, index.getIndexEntryLimit())); |
| | | } |
| | | else |
| | | { |
| | | processAttribute(index, entry, entryID, |
| | | new IndexKey(attrType, indexType, |
| | | index.getIndexEntryLimit())); |
| | | processAttribute(index, entry, entryID, new IndexKey(attrType, |
| | | indexType, index.getIndexEntryLimit())); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Return the number of entries processed by the rebuild manager. |
| | | * |
| | |
| | | return this.entriesProcessed.get(); |
| | | } |
| | | |
| | | |
| | | /** |
| | | * Return the total number of entries to process by the rebuild manager. |
| | | * |
| | |
| | | } |
| | | |
| | | @Override |
| | | public void diskLowThresholdReached(DiskSpaceMonitor monitor) { |
| | | public void diskLowThresholdReached(DiskSpaceMonitor monitor) |
| | | { |
| | | diskFullThresholdReached(monitor); |
| | | } |
| | | |
| | | @Override |
| | | public void diskFullThresholdReached(DiskSpaceMonitor monitor) { |
| | | public void diskFullThresholdReached(DiskSpaceMonitor monitor) |
| | | { |
| | | isCanceled = true; |
| | | Message msg = ERR_REBUILD_INDEX_LACK_DISK.get( |
| | | monitor.getDirectory().getPath(), monitor.getFreeSpace(), |
| | | monitor.getLowThreshold()); |
| | | Message msg = |
| | | ERR_REBUILD_INDEX_LACK_DISK.get(monitor.getDirectory().getPath(), |
| | | monitor.getFreeSpace(), monitor.getLowThreshold()); |
| | | logError(msg); |
| | | } |
| | | |
| | | @Override |
| | | public void diskSpaceRestored(DiskSpaceMonitor monitor) { |
| | | public void diskSpaceRestored(DiskSpaceMonitor monitor) |
| | | { |
| | | // Do nothing |
| | | } |
| | | } |
| | | |
| | | /** |
| | | * This class reports progress of rebuild index processing at fixed |
| | | * intervals. |
| | | * This class reports progress of rebuild index processing at fixed intervals. |
| | | */ |
| | | private class RebuildFirstPhaseProgressTask extends TimerTask |
| | | { |
| | | /** |
| | | * The number of records that had been processed at the time of the |
| | | * previous progress report. |
| | | * The number of records that had been processed at the time of the previous |
| | | * progress report. |
| | | */ |
| | | private long previousProcessed = 0; |
| | | |
| | |
| | | /** |
| | | * Create a new rebuild index progress task. |
| | | * |
| | | * @throws DatabaseException If an error occurred while accessing the JE |
| | | * database. |
| | | * @throws DatabaseException |
| | | * If an error occurred while accessing the JE database. |
| | | */ |
| | | public RebuildFirstPhaseProgressTask() throws DatabaseException |
| | | { |
| | |
| | | { |
| | | completed = 100f*entriesProcessed / rebuildManager.getTotEntries(); |
| | | } |
| | | Message message = NOTE_JEB_REBUILD_PROGRESS_REPORT.get(completed, |
| | | entriesProcessed, rebuildManager.getTotEntries(), rate); |
| | | Message message = |
| | | NOTE_JEB_REBUILD_PROGRESS_REPORT.get(completed, entriesProcessed, |
| | | rebuildManager.getTotEntries(), rate); |
| | | logError(message); |
| | | try |
| | | { |
| | |
| | | { |
| | | cacheMissRate = nCacheMiss/(float)deltaCount; |
| | | } |
| | | message = NOTE_JEB_REBUILD_CACHE_AND_MEMORY_REPORT.get( |
| | | freeMemory, cacheMissRate); |
| | | message = |
| | | NOTE_JEB_REBUILD_CACHE_AND_MEMORY_REPORT.get(freeMemory, |
| | | cacheMissRate); |
| | | logError(message); |
| | | prevEnvStats = envStats; |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * This class reports progress of first phase of import processing at |
| | | * fixed intervals. |
| | | * This class reports progress of first phase of import processing at fixed |
| | | * intervals. |
| | | */ |
| | | private final class FirstPhaseProgressTask extends TimerTask |
| | | { |
| | | /** |
| | | * The number of entries that had been read at the time of the |
| | | * previous progress report. |
| | | * The number of entries that had been read at the time of the previous |
| | | * progress report. |
| | | */ |
| | | private long previousCount = 0; |
| | | |
| | |
| | | */ |
| | | private EnvironmentStats previousStats; |
| | | |
| | | |
| | | // Determines if eviction has been detected. |
| | | private boolean evicting = false; |
| | | |
| | | // Entry count when eviction was detected. |
| | | private long evictionEntryCount = 0; |
| | | |
| | | |
| | | /** |
| | | * Create a new import progress task. |
| | | */ |
| | |
| | | previousTime = System.currentTimeMillis(); |
| | | try |
| | | { |
| | | previousStats = |
| | | rootContainer.getEnvironmentStats(new StatsConfig()); |
| | | previousStats = rootContainer.getEnvironmentStats(new StatsConfig()); |
| | | } |
| | | catch (DatabaseException e) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * The action to be performed by this timer task. |
| | | */ |
| | |
| | | long entriesIgnored = reader.getEntriesIgnored(); |
| | | long entriesRejected = reader.getEntriesRejected(); |
| | | float rate = 1000f * deltaCount / deltaTime; |
| | | message = NOTE_JEB_IMPORT_PROGRESS_REPORT.get(entriesRead, |
| | | entriesIgnored, entriesRejected, 0, rate); |
| | | message = |
| | | NOTE_JEB_IMPORT_PROGRESS_REPORT.get(entriesRead, entriesIgnored, |
| | | entriesRejected, 0, rate); |
| | | logError(message); |
| | | try |
| | | { |
| | |
| | | { |
| | | environmentStats = tmpEnv.getEnvironmentStats(new StatsConfig()); |
| | | } |
| | | long nCacheMiss = environmentStats.getNCacheMiss() - |
| | | previousStats.getNCacheMiss(); |
| | | long nCacheMiss = |
| | | environmentStats.getNCacheMiss() - previousStats.getNCacheMiss(); |
| | | |
| | | float cacheMissRate = 0; |
| | | if (deltaCount > 0) |
| | |
| | | long evictBinsStrip = environmentStats.getNBINsStripped(); |
| | | long cleanerRuns = environmentStats.getNCleanerRuns(); |
| | | long cleanerDeletions = environmentStats.getNCleanerDeletions(); |
| | | long cleanerEntriesRead = |
| | | environmentStats.getNCleanerEntriesRead(); |
| | | long cleanerEntriesRead = environmentStats.getNCleanerEntriesRead(); |
| | | long cleanerINCleaned = environmentStats.getNINsCleaned(); |
| | | long checkPoints = environmentStats.getNCheckpoints(); |
| | | if (evictPasses != 0) |
| | |
| | | evicting = true; |
| | | evictionEntryCount = reader.getEntriesRead(); |
| | | message = |
| | | NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED |
| | | .get(evictionEntryCount); |
| | | NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED.get(evictionEntryCount); |
| | | logError(message); |
| | | } |
| | | message = |
| | | NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get( |
| | | evictPasses, evictNodes, evictBinsStrip); |
| | | NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get(evictPasses, |
| | | evictNodes, evictBinsStrip); |
| | | logError(message); |
| | | } |
| | | if (cleanerRuns != 0) |
| | | { |
| | | message = |
| | | NOTE_JEB_IMPORT_LDIF_CLEANER_STATS.get(cleanerRuns, |
| | | cleanerDeletions, cleanerEntriesRead, |
| | | cleanerINCleaned); |
| | | cleanerDeletions, cleanerEntriesRead, cleanerINCleaned); |
| | | logError(message); |
| | | } |
| | | if (checkPoints > 1) |
| | | { |
| | | message = |
| | | NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints); |
| | | message = NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints); |
| | | logError(message); |
| | | } |
| | | previousStats = environmentStats; |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * This class reports progress of the second phase of import processing at |
| | | * fixed intervals. |
| | |
| | | private class SecondPhaseProgressTask extends TimerTask |
| | | { |
| | | /** |
| | | * The number of entries that had been read at the time of the |
| | | * previous progress report. |
| | | * The number of entries that had been read at the time of the previous |
| | | * progress report. |
| | | */ |
| | | private long previousCount = 0; |
| | | |
| | |
| | | |
| | | private long latestCount; |
| | | |
| | | |
| | | |
| | | /** |
| | | * Create a new import progress task. |
| | | * |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * The action to be performed by this timer task. |
| | | */ |
| | |
| | | long freeMemory = runTime.freeMemory() / MB; |
| | | EnvironmentStats environmentStats = |
| | | rootContainer.getEnvironmentStats(new StatsConfig()); |
| | | long nCacheMiss = environmentStats.getNCacheMiss() - |
| | | previousStats.getNCacheMiss(); |
| | | long nCacheMiss = |
| | | environmentStats.getNCacheMiss() - previousStats.getNCacheMiss(); |
| | | |
| | | float cacheMissRate = 0; |
| | | if (deltaCount > 0) |
| | |
| | | evicting = true; |
| | | } |
| | | message = |
| | | NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get( |
| | | evictPasses, evictNodes, evictBinsStrip); |
| | | NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get(evictPasses, |
| | | evictNodes, evictBinsStrip); |
| | | logError(message); |
| | | } |
| | | if (cleanerRuns != 0) |
| | | { |
| | | message = |
| | | NOTE_JEB_IMPORT_LDIF_CLEANER_STATS.get(cleanerRuns, |
| | | cleanerDeletions, cleanerEntriesRead, |
| | | cleanerINCleaned); |
| | | cleanerDeletions, cleanerEntriesRead, cleanerINCleaned); |
| | | logError(message); |
| | | } |
| | | if (checkPoints > 1) |
| | | { |
| | | message = |
| | | NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints); |
| | | message = NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints); |
| | | logError(message); |
| | | } |
| | | previousStats = environmentStats; |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * A class to hold information about the entry determined by the LDIF reader. |
| | | * Mainly the suffix the entry belongs under and the ID assigned to it by the |
| | | * reader. |
| | | * |
| | | */ |
| | | public class EntryInformation |
| | | { |
| | | private EntryID entryID; |
| | | private Suffix suffix; |
| | | |
| | | |
| | | /** |
| | | * Return the suffix associated with the entry. |
| | | * |
| | |
| | | /** |
| | | * Set the suffix instance associated with the entry. |
| | | * |
| | | * @param suffix The suffix associated with the entry. |
| | | * @param suffix |
| | | * The suffix associated with the entry. |
| | | */ |
| | | public void setSuffix(Suffix suffix) |
| | | { |
| | |
| | | /** |
| | | * Set the entry's ID. |
| | | * |
| | | * @param entryID The entry ID to set the entry ID to. |
| | | * @param entryID |
| | | * The entry ID to set the entry ID to. |
| | | */ |
| | | public void setEntryID(EntryID entryID) |
| | | { |
| | |
| | | |
| | | /** |
| | | * This class defines the individual index type available. |
| | | * |
| | | */ |
| | | public enum ImportIndexType { |
| | | public enum ImportIndexType |
| | | { |
| | | /** |
| | | * The DN index type. |
| | | **/ |
| | |
| | | } |
| | | |
| | | /** |
| | | * This class is used as an index key for hash maps that need to |
| | | * process multiple suffix index elements into a single queue and/or maps |
| | | * based on both attribute type and index type |
| | | * (ie., cn.equality, sn.equality,...). |
| | | * This class is used as an index key for hash maps that need to process |
| | | * multiple suffix index elements into a single queue and/or maps based on |
| | | * both attribute type and index type (ie., cn.equality, sn.equality,...). |
| | | */ |
| | | public class IndexKey { |
| | | public class IndexKey |
| | | { |
| | | |
| | | private final AttributeType attributeType; |
| | | private final ImportIndexType indexType; |
| | | private final int entryLimit; |
| | | |
| | | |
| | | /** |
| | | * Create index key instance using the specified attribute type, index type |
| | | * and index entry limit. |
| | | * |
| | | * @param attributeType The attribute type. |
| | | * @param indexType The index type. |
| | | * @param entryLimit The entry limit for the index. |
| | | * @param attributeType |
| | | * The attribute type. |
| | | * @param indexType |
| | | * The index type. |
| | | * @param entryLimit |
| | | * The entry limit for the index. |
| | | */ |
| | | IndexKey(AttributeType attributeType, ImportIndexType indexType, |
| | | int entryLimit) |
| | |
| | | |
| | | /** |
| | | * An equals method that uses both the attribute type and the index type. |
| | | * Only returns {@code true} if the attribute type and index type are |
| | | * equal. |
| | | * Only returns {@code true} if the attribute type and index type are equal. |
| | | * |
| | | * @param obj the object to compare. |
| | | * @param obj |
| | | * the object to compare. |
| | | * @return {@code true} if the objects are equal, or {@code false} if they |
| | | * are not. |
| | | */ |
| | | @Override |
| | | public boolean equals(Object obj) |
| | | { |
| | | if (obj instanceof IndexKey) { |
| | | if (obj instanceof IndexKey) |
| | | { |
| | | IndexKey oKey = (IndexKey) obj; |
| | | if(attributeType.equals(oKey.getAttributeType()) && |
| | | indexType.equals(oKey.getIndexType())) |
| | | if (attributeType.equals(oKey.getAttributeType()) |
| | | && indexType.equals(oKey.getIndexType())) |
| | | { |
| | | return true; |
| | | } |
| | |
| | | } |
| | | |
| | | /** |
| | | * Return the index key name, which is the attribute type primary name, |
| | | * a period, and the index type name. Used for building file names and |
| | | * Return the index key name, which is the attribute type primary name, a |
| | | * period, and the index type name. Used for building file names and |
| | | * progress output. |
| | | * |
| | | * @return The index key name. |
| | | */ |
| | | public String getName() |
| | | { |
| | | return attributeType.getPrimaryName() + "." + |
| | | StaticUtils.toLowerCase(indexType.name()); |
| | | return attributeType.getPrimaryName() + "." |
| | | + StaticUtils.toLowerCase(indexType.name()); |
| | | } |
| | | |
| | | /** |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | /** |
| | | * The temporary enviroment will be shared when multiple suffixes are being |
| | | * The temporary environment will be shared when multiple suffixes are being |
| | | * processed. This interface is used by those suffix instance to do parental |
| | | * checking of the DN cache. |
| | | */ |
| | | public static interface DNCache { |
| | | public static interface DNCache |
| | | { |
| | | |
| | | /** |
| | | * Returns {@code true} if the specified DN is contained in the DN cache, |
| | | * or {@code false} otherwise. |
| | | * Returns {@code true} if the specified DN is contained in the DN cache, or |
| | | * {@code false} otherwise. |
| | | * |
| | | * @param dn The DN to check the presence of. |
| | | * @param dn |
| | | * The DN to check the presence of. |
| | | * @return {@code true} if the cache contains the DN, or {@code false} if it |
| | | * is not. |
| | | * @throws DatabaseException If an error occurs reading the database. |
| | | * @throws DatabaseException |
| | | * If an error occurs reading the database. |
| | | */ |
| | | public boolean contains(DN dn) throws DatabaseException; |
| | | } |
| | |
| | | * Create a temporary DB environment and database to be used as a cache of |
| | | * DNs when DN validation is performed in phase one processing. |
| | | * |
| | | * @param envPath The file path to create the enviroment under. |
| | | * @throws DatabaseException If an error occurs either creating the |
| | | * environment or the DN database. |
| | | * @param envPath |
| | | * The file path to create the environment under. |
| | | * @throws DatabaseException |
| | | * If an error occurs either creating the environment or the DN |
| | | * database. |
| | | */ |
| | | public TmpEnv(File envPath) throws DatabaseException |
| | | { |
| | |
| | | envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false"); |
| | | envConfig.setConfigParam(EnvironmentConfig.EVICTOR_LRU_ONLY, "false"); |
| | | envConfig.setConfigParam(EnvironmentConfig.EVICTOR_NODES_PER_SCAN, "128"); |
| | | envConfig.setConfigParam(EnvironmentConfig.MAX_MEMORY, |
| | | Long.toString(tmpEnvCacheSize)); |
| | | envConfig.setConfigParam(EnvironmentConfig.MAX_MEMORY, Long |
| | | .toString(tmpEnvCacheSize)); |
| | | DatabaseConfig dbConfig = new DatabaseConfig(); |
| | | dbConfig.setAllowCreate(true); |
| | | dbConfig.setTransactional(false); |
| | |
| | | private byte[] hashCode(byte[] b) |
| | | { |
| | | long hash = FNV_INIT; |
| | | for (byte aB : b) { |
| | | for (byte aB : b) |
| | | { |
| | | hash ^= aB; |
| | | hash *= FNV_PRIME; |
| | | } |
| | |
| | | |
| | | /** |
| | | * Shutdown the temporary environment. |
| | | * @throws JebException If error occurs. |
| | | * |
| | | * @throws JebException |
| | | * If error occurs. |
| | | */ |
| | | public void shutdown() throws JebException |
| | | { |
| | |
| | | * the DN does not already exist in the cache and was inserted, or |
| | | * {@code false} if the DN exists already in the cache. |
| | | * |
| | | * @param dn The DN to insert in the cache. |
| | | * @param val A database entry to use in the insert. |
| | | * @param key A database entry to use in the insert. |
| | | * @param dn |
| | | * The DN to insert in the cache. |
| | | * @param val |
| | | * A database entry to use in the insert. |
| | | * @param key |
| | | * A database entry to use in the insert. |
| | | * @return {@code true} if the DN was inserted in the cache, or |
| | | * {@code false} if the DN exists in the cache already and could |
| | | * not be inserted. |
| | | * @throws JebException If an error occurs accessing the database. |
| | | * {@code false} if the DN exists in the cache already and could not |
| | | * be inserted. |
| | | * @throws JebException |
| | | * If an error occurs accessing the database. |
| | | */ |
| | | public boolean insert(DN dn, DatabaseEntry val, DatabaseEntry key) |
| | | throws JebException |
| | |
| | | status = cursor.getSearchKey(key, dns, LockMode.RMW); |
| | | if(status == OperationStatus.NOTFOUND) |
| | | { |
| | | Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | Message message = |
| | | Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | "Search DN cache failed."); |
| | | throw new JebException(message); |
| | | } |
| | |
| | | } |
| | | |
| | | //Add the DN to the DNs as because of a hash collision. |
| | | private void addDN(DatabaseEntry val, Cursor cursor, |
| | | byte[] dnBytes) throws JebException |
| | | private void addDN(DatabaseEntry val, Cursor cursor, byte[] dnBytes) |
| | | throws JebException |
| | | { |
| | | byte[] bytes = val.getData(); |
| | | int pLen = PackedInteger.getWriteIntLength(dnBytes.length); |
| | |
| | | OperationStatus status = cursor.putCurrent(newVal); |
| | | if(status != OperationStatus.SUCCESS) |
| | | { |
| | | Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | Message message = |
| | | Message.raw(Category.JEB, Severity.SEVERE_ERROR, |
| | | "Add of DN to DN cache failed."); |
| | | throw new JebException(message); |
| | | } |
| | |
| | | /** |
| | | * Check if the specified DN is contained in the temporary DN cache. |
| | | * |
| | | * @param dn A DN check for. |
| | | * @return {@code true} if the specified DN is in the temporary DN cache, |
| | | * or {@code false} if it is not. |
| | | * @param dn |
| | | * A DN check for. |
| | | * @return {@code true} if the specified DN is in the temporary DN cache, or |
| | | * {@code false} if it is not. |
| | | */ |
| | | @Override |
| | | public boolean contains(DN dn) |
| | |
| | | DatabaseEntry key = new DatabaseEntry(); |
| | | byte[] dnBytes = StaticUtils.getBytes(dn.toNormalizedString()); |
| | | key.setData(hashCode(dnBytes)); |
| | | try { |
| | | try |
| | | { |
| | | cursor = dnCache.openCursor(null, CursorConfig.DEFAULT); |
| | | DatabaseEntry dns = new DatabaseEntry(); |
| | | OperationStatus status = |
| | |
| | | /** |
| | | * Return temporary environment stats. |
| | | * |
| | | * @param statsConfig A stats configuration instance. |
| | | * |
| | | * @param statsConfig |
| | | * A stats configuration instance. |
| | | * @return Environment stats. |
| | | * @throws DatabaseException If an error occurs retrieving the stats. |
| | | * @throws DatabaseException |
| | | * If an error occurs retrieving the stats. |
| | | */ |
| | | public EnvironmentStats getEnvironmentStats(StatsConfig statsConfig) |
| | | throws DatabaseException |
| | |
| | | * {@inheritDoc} |
| | | */ |
| | | @Override |
| | | public void diskLowThresholdReached(DiskSpaceMonitor monitor) { |
| | | public void diskLowThresholdReached(DiskSpaceMonitor monitor) |
| | | { |
| | | diskFullThresholdReached(monitor); |
| | | } |
| | | |
| | |
| | | * {@inheritDoc} |
| | | */ |
| | | @Override |
| | | public void diskFullThresholdReached(DiskSpaceMonitor monitor) { |
| | | public void diskFullThresholdReached(DiskSpaceMonitor monitor) |
| | | { |
| | | isCanceled = true; |
| | | Message msg; |
| | | if(!isPhaseOneDone) |
| | | { |
| | | msg = ERR_IMPORT_LDIF_LACK_DISK_PHASE_ONE.get( |
| | | monitor.getDirectory().getPath(), monitor.getFreeSpace(), |
| | | monitor.getLowThreshold()); |
| | | msg = |
| | | ERR_IMPORT_LDIF_LACK_DISK_PHASE_ONE.get(monitor.getDirectory() |
| | | .getPath(), monitor.getFreeSpace(), monitor.getLowThreshold()); |
| | | } |
| | | else |
| | | { |
| | | msg = ERR_IMPORT_LDIF_LACK_DISK_PHASE_TWO.get( |
| | | monitor.getDirectory().getPath(), monitor.getFreeSpace(), |
| | | monitor.getLowThreshold()); |
| | | msg = |
| | | ERR_IMPORT_LDIF_LACK_DISK_PHASE_TWO.get(monitor.getDirectory() |
| | | .getPath(), monitor.getFreeSpace(), monitor.getLowThreshold()); |
| | | } |
| | | logError(msg); |
| | | } |
| | |
| | | * {@inheritDoc} |
| | | */ |
| | | @Override |
| | | public void diskSpaceRestored(DiskSpaceMonitor monitor) { |
| | | public void diskSpaceRestored(DiskSpaceMonitor monitor) |
| | | { |
| | | // Do nothing. |
| | | } |
| | | } |