Fix issues 2831 and 1948.
Change local DB backend configuration so that db-directory and import-temp-directory properties now name a parent directory within which a sub-directory is created having the same name as the backend-id. This simplifies configuration and reduces the risk of name collisions.
| | |
| | | <mkdir dir="${pdir}/config/messages" /> |
| | | <mkdir dir="${pdir}/config/MakeLDIF" /> |
| | | <mkdir dir="${pdir}/db" /> |
| | | <mkdir dir="${pdir}/import-tmp" /> |
| | | <mkdir dir="${pdir}/changelogDb" /> |
| | | <mkdir dir="${pdir}/ldif" /> |
| | | <mkdir dir="${pdir}/legal-notices" /> |
| | |
| | | ds-cfg-subtree-delete-size-limit: 100000 |
| | | ds-cfg-subtree-delete-batch-size: 5000 |
| | | ds-cfg-preload-time-limit: 0 seconds |
| | | ds-cfg-import-temp-directory: importTmp |
| | | ds-cfg-import-temp-directory: import-tmp |
| | | ds-cfg-import-buffer-size: 256 megabytes |
| | | ds-cfg-import-queue-size: 100 |
| | | ds-cfg-import-pass-size: 0 |
| | |
| | | <adm:requires-admin-action> |
| | | <adm:component-restart /> |
| | | </adm:requires-admin-action> |
| | | <adm:default-behavior> |
| | | <adm:defined> |
| | | <adm:value>db</adm:value> |
| | | </adm:defined> |
| | | </adm:default-behavior> |
| | | <adm:syntax> |
| | | <adm:string /> |
| | | </adm:syntax> |
| | |
| | | </adm:profile> |
| | | </adm:property> |
| | | <adm:property name="import-temp-directory" mandatory="true"> |
| | | <adm:TODO> |
| | | Default to something derived from the backend-id |
| | | </adm:TODO> |
| | | <adm:synopsis> |
| | | Specifies the location of the directory that will be used for the |
| | | files used to hold temporary information that will be used during |
| | |
| | | </adm:none> |
| | | </adm:requires-admin-action> |
| | | <adm:default-behavior> |
| | | <adm:undefined /> |
| | | <adm:defined> |
| | | <adm:value>import-tmp</adm:value> |
| | | </adm:defined> |
| | | </adm:default-behavior> |
| | | <adm:syntax> |
| | | <adm:string /> |
| | |
| | | sending request to get remote monitor data |
| | | SEVERE_ERR_EXCEPTION_REPLAYING_REPLICATION_MESSAGE_109=An Exception was caught \ |
| | | while replaying replication message : %s |
| | | SEVERE_ERR_REPLICATION_SERVER_CONFIG_NOT_FOUND_110=The replication server \ |
| | | configuration could not be found |
| | | |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.backends.jeb; |
| | | |
| | |
| | | public class AttributeIndexBuilder implements IndexBuilder |
| | | { |
| | | /** |
| | | * The import context. |
| | | * The directory in which temporary merge files are held. |
| | | */ |
| | | private ImportContext importContext; |
| | | private final File tempDir; |
| | | |
| | | /** |
| | | * The index database. |
| | | */ |
| | | private Index index; |
| | | private final Index index; |
| | | |
| | | /** |
| | | * The indexer to generate the index keys. |
| | | */ |
| | | private Indexer indexer; |
| | | private final Indexer indexer; |
| | | |
| | | /** |
| | | * The write buffer. |
| | | */ |
| | | ArrayList<IndexMod> buffer; |
| | | private ArrayList<IndexMod> buffer; |
| | | |
| | | /** |
| | | * The write buffer size. |
| | | */ |
| | | private int bufferSize; |
| | | private final int bufferSize; |
| | | |
| | | /** |
| | | * Current output file number. |
| | |
| | | private int fileNumber = 0; |
| | | |
| | | /** |
| | | * The index entry limit. |
| | | */ |
| | | private int entryLimit; |
| | | |
| | | /** |
| | | * A unique prefix for temporary files to prevent conflicts. |
| | | */ |
| | | private String fileNamePrefix; |
| | | private final String fileNamePrefix; |
| | | |
| | | /** |
| | | * Indicates whether we are replacing existing data or not. |
| | | */ |
| | | private boolean replaceExisting = false; |
| | | private final boolean replaceExisting; |
| | | |
| | | |
| | | private ByteArrayOutputStream addBytesStream = new ByteArrayOutputStream(); |
| | | private ByteArrayOutputStream delBytesStream = new ByteArrayOutputStream(); |
| | | private final ByteArrayOutputStream addBytesStream = |
| | | new ByteArrayOutputStream(); |
| | | private final ByteArrayOutputStream delBytesStream = |
| | | new ByteArrayOutputStream(); |
| | | |
| | | private DataOutputStream addBytesDataStream; |
| | | private DataOutputStream delBytesDataStream; |
| | | private final DataOutputStream addBytesDataStream; |
| | | private final DataOutputStream delBytesDataStream; |
| | | |
| | | /** |
| | | * A file name filter to identify temporary files we have written. |
| | | */ |
| | | private FilenameFilter filter = new FilenameFilter() |
| | | private final FilenameFilter filter = new FilenameFilter() |
| | | { |
| | | public boolean accept(File d, String name) |
| | | { |
| | |
| | | public AttributeIndexBuilder(ImportContext importContext, |
| | | Index index, int entryLimit, long bufferSize) |
| | | { |
| | | this.importContext = importContext; |
| | | File parentDir = getFileForPath(importContext.getConfig() |
| | | .getImportTempDirectory()); |
| | | this.tempDir = new File(parentDir, |
| | | importContext.getConfig().getBackendId()); |
| | | |
| | | this.index = index; |
| | | this.indexer = index.indexer; |
| | | this.entryLimit = entryLimit; |
| | | this.bufferSize = (int)bufferSize/100; |
| | | long tid = Thread.currentThread().getId(); |
| | | fileNamePrefix = index.getName() + "_" + tid + "_"; |
| | |
| | | public void startProcessing() |
| | | { |
| | | // Clean up any work files left over from a previous run. |
| | | File tempDir = getFileForPath( |
| | | importContext.getConfig().getImportTempDirectory()); |
| | | File[] files = tempDir.listFiles(filter); |
| | | if (files != null) |
| | | { |
| | |
| | | return index.getEntryLimitExceededCount(); |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | | * Record the insertion of an entry ID. |
| | | * @param key The index key. |
| | |
| | | // Start a new file. |
| | | fileNumber++; |
| | | String fileName = fileNamePrefix + String.valueOf(fileNumber); |
| | | File file = new File(getFileForPath( |
| | | importContext.getConfig().getImportTempDirectory()), |
| | | fileName); |
| | | File file = new File(tempDir, fileName); |
| | | BufferedOutputStream bufferedStream = |
| | | new BufferedOutputStream(new FileOutputStream(file)); |
| | | DataOutputStream dataStream = new DataOutputStream(bufferedStream); |
| | |
| | | /** |
| | | * A list of monitor providers created for this backend instance. |
| | | */ |
| | | private ArrayList<MonitorProvider> monitorProviders = |
| | | new ArrayList<MonitorProvider>(); |
| | | private ArrayList<MonitorProvider<?>> monitorProviders = |
| | | new ArrayList<MonitorProvider<?>>(); |
| | | |
| | | /** |
| | | * The base DNs defined for this backend instance. |
| | |
| | | */ |
| | | private static HashSet<String> supportedControls; |
| | | |
| | | /** |
| | | * The features supported by this backend. |
| | | */ |
| | | private static HashSet<String> supportedFeatures = new HashSet<String>(0); |
| | | |
| | | |
| | | |
| | | static |
| | | { |
| | | // Set our supported controls. |
| | |
| | | supportedControls.add(OID_VLV_REQUEST_CONTROL); |
| | | } |
| | | |
| | | /** |
| | | * The features supported by this backend. |
| | | */ |
| | | private static HashSet<String> supportedFeatures; |
| | | |
| | | static { |
| | | // Set our supported features. |
| | | supportedFeatures = new HashSet<String>(); |
| | | |
| | | //NYI |
| | | } |
| | | |
| | | |
| | | |
| | | /** |
| | |
| | | */ |
| | | private long checksumDbEnv() { |
| | | |
| | | File backendDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File parentDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File backendDirectory = new File(parentDirectory, cfg.getBackendId()); |
| | | |
| | | List<File> jdbFiles = new ArrayList<File>(); |
| | | if(backendDirectory.isDirectory()) |
| | | { |
| | |
| | | } |
| | | |
| | | // Deregister our monitor providers. |
| | | for (MonitorProvider monitor : monitorProviders) |
| | | for (MonitorProvider<?> monitor : monitorProviders) |
| | | { |
| | | DirectoryServer.deregisterMonitorProvider( |
| | | monitor.getMonitorInstanceName().toLowerCase()); |
| | | } |
| | | monitorProviders = new ArrayList<MonitorProvider>(); |
| | | monitorProviders = new ArrayList<MonitorProvider<?>>(); |
| | | |
| | | // We presume the server will prevent more operations coming into this |
| | | // backend, but there may be existing operations already in the |
| | |
| | | @Override() |
| | | public HashSet<String> getSupportedFeatures() |
| | | { |
| | | return new HashSet<String>(); //NYI |
| | | return supportedFeatures; |
| | | } |
| | | |
| | | |
| | |
| | | // environment and re-open it. Only do this when we are |
| | | // importing to all the base DNs in the backend or if the backend only |
| | | // have one base DN. |
| | | |
| | | File backendDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File parentDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File backendDirectory = new File(parentDirectory, cfg.getBackendId()); |
| | | EnvManager.removeFiles(backendDirectory.getPath()); |
| | | envConfig.setReadOnly(false); |
| | | envConfig.setAllowCreate(true); |
| | |
| | | { |
| | | BackupManager backupManager = |
| | | new BackupManager(getBackendID()); |
| | | backupManager.createBackup(cfg, backupConfig); |
| | | File parentDir = getFileForPath(cfg.getDBDirectory()); |
| | | File backendDir = new File(parentDir, cfg.getBackendId()); |
| | | backupManager.createBackup(backendDir, backupConfig); |
| | | } |
| | | |
| | | |
| | |
| | | { |
| | | BackupManager backupManager = |
| | | new BackupManager(getBackendID()); |
| | | backupManager.restoreBackup(cfg, restoreConfig); |
| | | File parentDir = getFileForPath(cfg.getDBDirectory()); |
| | | File backendDir = new File(parentDir, cfg.getBackendId()); |
| | | backupManager.restoreBackup(backendDir, restoreConfig); |
| | | } |
| | | |
| | | |
| | |
| | | throws ConfigException, JebException |
| | | { |
| | | // Determine the backend database directory. |
| | | File backendDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File parentDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File backendDirectory = new File(parentDirectory, cfg.getBackendId()); |
| | | EnvManager.removeFiles(backendDirectory.getPath()); |
| | | } |
| | | |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.backends.jeb; |
| | | import org.opends.messages.Message; |
| | |
| | | import static org.opends.messages.JebMessages.*; |
| | | import static org.opends.server.util.ServerConstants.*; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | | import org.opends.server.admin.std.server.LocalDBBackendCfg; |
| | | |
| | | |
| | | |
| | | /** |
| | | * A backup manager for JE backends. |
| | |
| | | * log files that are unchanged since the previous backup. The remaining |
| | | * zip entries are the JE log files themselves, which, for an incremental, |
| | | * only include those files that have changed. |
| | | * @param cfg The configuration of the backend instance for |
| | | * @param backendDir The directory of the backend instance for |
| | | * which the backup is required. |
| | | * @param backupConfig The configuration to use when performing the backup. |
| | | * @throws DirectoryException If a Directory Server error occurs. |
| | | */ |
| | | public void createBackup(LocalDBBackendCfg cfg, BackupConfig backupConfig) |
| | | public void createBackup(File backendDir, BackupConfig backupConfig) |
| | | throws DirectoryException |
| | | { |
| | | // Get the properties to use for the backup. |
| | |
| | | // If this is an incremental, determine the base backup for this backup. |
| | | HashSet<String> dependencies = new HashSet<String>(); |
| | | BackupInfo baseBackup = null; |
| | | File backendDir = getFileForPath(cfg.getDBDirectory()); |
| | | /* |
| | | FilenameFilter backupTagFilter = new FilenameFilter() |
| | | { |
| | |
| | | |
| | | /** |
| | | * Restore a JE backend from backup, or verify the backup. |
| | | * @param cfg The configuration of the backend instance to be |
| | | * @param backendDir The configuration of the backend instance to be |
| | | * restored. |
| | | * @param restoreConfig The configuration to use when performing the restore. |
| | | * @throws DirectoryException If a Directory Server error occurs. |
| | | */ |
| | | public void restoreBackup(LocalDBBackendCfg cfg, |
| | | public void restoreBackup(File backendDir, |
| | | RestoreConfig restoreConfig) |
| | | throws DirectoryException |
| | | { |
| | |
| | | |
| | | // Create a restore directory with a different name to the backend |
| | | // directory. |
| | | File currentDir = getFileForPath(cfg.getDBDirectory()); |
| | | File restoreDir = new File(currentDir.getPath() + "-restore-" + backupID); |
| | | File restoreDir = new File(backendDir.getPath() + "-restore-" + backupID); |
| | | if (!verifyOnly) |
| | | { |
| | | File[] files = restoreDir.listFiles(); |
| | |
| | | // Delete the current backend directory and rename the restore directory. |
| | | if (!verifyOnly) |
| | | { |
| | | File[] files = currentDir.listFiles(); |
| | | File[] files = backendDir.listFiles(); |
| | | if (files != null) |
| | | { |
| | | for (File f : files) |
| | |
| | | f.delete(); |
| | | } |
| | | } |
| | | currentDir.delete(); |
| | | if (!restoreDir.renameTo(currentDir)) |
| | | backendDir.delete(); |
| | | if (!restoreDir.renameTo(backendDir)) |
| | | { |
| | | Message msg = ERR_JEB_CANNOT_RENAME_RESTORE_DIRECTORY.get( |
| | | restoreDir.getPath(), currentDir.getPath()); |
| | | restoreDir.getPath(), backendDir.getPath()); |
| | | throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), |
| | | msg); |
| | | } |
| | |
| | | /** |
| | | * The number of merge passes. |
| | | */ |
| | | int mergePassNumber = 1; |
| | | private int mergePassNumber = 1; |
| | | |
| | | |
| | | /** |
| | |
| | | startTime = System.currentTimeMillis(); |
| | | |
| | | // Create a temporary work directory. |
| | | File tempDir = getFileForPath(config.getImportTempDirectory()); |
| | | if(!tempDir.exists() && !tempDir.mkdir()) |
| | | File parentDir = getFileForPath(config.getImportTempDirectory()); |
| | | File tempDir = new File(parentDir, config.getBackendId()); |
| | | if(!tempDir.exists() && !tempDir.mkdirs()) |
| | | { |
| | | Message msg = ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get( |
| | | String.valueOf(tempDir)); |
| | |
| | | * @throws DatabaseException If an error occurs in the JE database. |
| | | * @throws JebException If an error occurs in the JE backend. |
| | | */ |
| | | public void processEntry(ImportContext importContext, Entry entry) |
| | | private void processEntry(ImportContext importContext, Entry entry) |
| | | throws JebException, DatabaseException |
| | | { |
| | | DN entryDN = entry.getDN(); |
| | |
| | | |
| | | if (txn != null) |
| | | { |
| | | importContext.getEntryContainer().transactionCommit(txn); |
| | | EntryContainer.transactionCommit(txn); |
| | | txn = null; |
| | | } |
| | | } |
| | |
| | | { |
| | | if (txn != null) |
| | | { |
| | | importContext.getEntryContainer().transactionAbort(txn); |
| | | EntryContainer.transactionAbort(txn); |
| | | } |
| | | } |
| | | } |
| | |
| | | /** |
| | | * This class reports progress of the import job at fixed intervals. |
| | | */ |
| | | class ProgressTask extends TimerTask |
| | | private final class ProgressTask extends TimerTask |
| | | { |
| | | /** |
| | | * The number of entries that had been read at the time of the |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.backends.jeb; |
| | | import org.opends.messages.Message; |
| | |
| | | * A thread to merge a set of intermediate files from an index builder |
| | | * into an index database. |
| | | */ |
| | | public class IndexMergeThread extends DirectoryThread |
| | | final class IndexMergeThread extends DirectoryThread |
| | | { |
| | | /** |
| | | * The tracer object for the debug logger. |
| | |
| | | /** |
| | | * The configuration of the JE backend containing the index. |
| | | */ |
| | | LocalDBBackendCfg config; |
| | | private LocalDBBackendCfg config; |
| | | |
| | | /** |
| | | * The LDIF import configuration, which indicates whether we are |
| | | * appending to existing data. |
| | | */ |
| | | LDIFImportConfig ldifImportConfig; |
| | | private LDIFImportConfig ldifImportConfig; |
| | | |
| | | |
| | | /** |
| | | * The indexer to generate and compare index keys. |
| | | */ |
| | | Indexer indexer; |
| | | private Indexer indexer; |
| | | |
| | | /** |
| | | * The index database being written. |
| | | */ |
| | | Index index; |
| | | private Index index; |
| | | |
| | | |
| | | /** |
| | | * The index entry limit. |
| | | */ |
| | | int entryLimit; |
| | | |
| | | /** |
| | | * The name of the index for use in file names and log messages. |
| | | */ |
| | | String indexName; |
| | | private int entryLimit; |
| | | |
| | | /** |
| | | * Indicates whether we are replacing existing data or not. |
| | |
| | | * @param index The index database to be written. |
| | | * @param entryLimit The configured index entry limit. |
| | | */ |
| | | IndexMergeThread(LocalDBBackendCfg config, |
| | | public IndexMergeThread(LocalDBBackendCfg config, |
| | | LDIFImportConfig ldifImportConfig, |
| | | Index index, int entryLimit) |
| | | { |
| | |
| | | * written to the index. |
| | | * @throws Exception If an error occurs. |
| | | */ |
| | | public void merge() throws Exception |
| | | private void merge() throws Exception |
| | | { |
| | | // An ordered map of the current input keys from each file. |
| | | OctetStringKeyComparator comparator = |
| | |
| | | new TreeMap<ASN1OctetString, MergeValue>(comparator); |
| | | |
| | | // Open all the files. |
| | | File tempDir = getFileForPath(config.getImportTempDirectory()); |
| | | File parentDir = getFileForPath(config.getImportTempDirectory()); |
| | | File tempDir = new File(parentDir, config.getBackendId()); |
| | | File[] files = tempDir.listFiles(filter); |
| | | |
| | | if (files == null || files.length == 0) |
| | |
| | | throws DatabaseException, ConfigException |
| | | { |
| | | // Determine the backend database directory. |
| | | File backendDirectory = getFileForPath(config.getDBDirectory()); |
| | | File parentDirectory = getFileForPath(config.getDBDirectory()); |
| | | File backendDirectory = new File(parentDirectory, config.getBackendId()); |
| | | |
| | | // Create the directory if it doesn't exist. |
| | | if (!backendDirectory.exists()) |
| | |
| | | { |
| | | boolean acceptable = true; |
| | | |
| | | File backendDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | //Make sure the directory either alreadly exists or is able to create. |
| | | File parentDirectory = getFileForPath(config.getDBDirectory()); |
| | | File backendDirectory = new File(parentDirectory, config.getBackendId()); |
| | | |
| | | //Make sure the directory either already exists or is able to create. |
| | | if (!backendDirectory.exists()) |
| | | { |
| | | if(!backendDirectory.mkdirs()) |
| | |
| | | EnvironmentConfig oldEnvConfig = env.getConfig(); |
| | | EnvironmentConfig newEnvConfig = |
| | | ConfigurableEnvironment.parseConfigEntry(cfg); |
| | | Map paramsMap = EnvironmentParams.SUPPORTED_PARAMS; |
| | | Map<?,?> paramsMap = EnvironmentParams.SUPPORTED_PARAMS; |
| | | |
| | | // Iterate through native JE properties. |
| | | SortedSet<String> jeProperties = cfg.getJEProperty(); |
| | |
| | | // Create the directory if it doesn't exist. |
| | | if(!cfg.getDBDirectory().equals(this.config.getDBDirectory())) |
| | | { |
| | | File backendDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File parentDirectory = getFileForPath(config.getDBDirectory()); |
| | | File backendDirectory = |
| | | new File(parentDirectory, config.getBackendId()); |
| | | |
| | | if (!backendDirectory.exists()) |
| | | { |
| | |
| | | // Get the backend database backendDirectory permissions and apply |
| | | if(FilePermission.canSetPermissions()) |
| | | { |
| | | File backendDirectory = getFileForPath(cfg.getDBDirectory()); |
| | | File parentDirectory = getFileForPath(config.getDBDirectory()); |
| | | File backendDirectory = new File(parentDirectory, |
| | | config.getBackendId()); |
| | | try |
| | | { |
| | | if(!FilePermission.setPermissions(backendDirectory, |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.backends.jeb; |
| | | |
| | |
| | | import java.io.*; |
| | | |
| | | import com.sleepycat.je.DatabaseException; |
| | | import com.sleepycat.je.Transaction; |
| | | |
| | | /** |
| | | * This class is used to create an VLV vlvIndex for an import process. |
| | |
| | | public class VLVIndexBuilder implements IndexBuilder |
| | | { |
| | | /** |
| | | * The import context. |
| | | * The directory in which temporary merge files are held. |
| | | */ |
| | | private ImportContext importContext; |
| | | private final File tempDir; |
| | | |
| | | /** |
| | | * The vlvIndex database. |
| | | */ |
| | | private VLVIndex vlvIndex; |
| | | private final VLVIndex vlvIndex; |
| | | |
| | | /** |
| | | * The add write buffer. |
| | | */ |
| | | TreeMap<SortValues,EntryID> addBuffer; |
| | | private TreeMap<SortValues,EntryID> addBuffer; |
| | | |
| | | /** |
| | | * The delete write buffer. |
| | | */ |
| | | TreeMap<SortValues,EntryID> delBuffer; |
| | | private TreeMap<SortValues,EntryID> delBuffer; |
| | | |
| | | /** |
| | | * The write buffer size. |
| | | */ |
| | | private int bufferSize; |
| | | private final int bufferSize; |
| | | |
| | | /** |
| | | * Current output file number. |
| | |
| | | /** |
| | | * A unique prefix for temporary files to prevent conflicts. |
| | | */ |
| | | private String fileNamePrefix; |
| | | private final String fileNamePrefix; |
| | | |
| | | /** |
| | | * Indicates whether we are replacing existing data or not. |
| | | */ |
| | | private boolean replaceExisting = false; |
| | | |
| | | |
| | | private ByteArrayOutputStream addBytesStream = new ByteArrayOutputStream(); |
| | | private ByteArrayOutputStream delBytesStream = new ByteArrayOutputStream(); |
| | | |
| | | private DataOutputStream addBytesDataStream; |
| | | private DataOutputStream delBytesDataStream; |
| | | private final boolean replaceExisting; |
| | | |
| | | /** |
| | | * A file name filter to identify temporary files we have written. |
| | | */ |
| | | private FilenameFilter filter = new FilenameFilter() |
| | | private final FilenameFilter filter = new FilenameFilter() |
| | | { |
| | | public boolean accept(File d, String name) |
| | | { |
| | |
| | | public VLVIndexBuilder(ImportContext importContext, |
| | | VLVIndex vlvIndex, long bufferSize) |
| | | { |
| | | this.importContext = importContext; |
| | | File parentDir = getFileForPath(importContext.getConfig() |
| | | .getImportTempDirectory()); |
| | | this.tempDir = new File(parentDir, |
| | | importContext.getConfig().getBackendId()); |
| | | |
| | | this.vlvIndex = vlvIndex; |
| | | this.bufferSize = (int)bufferSize/100; |
| | | long tid = Thread.currentThread().getId(); |
| | | fileNamePrefix = vlvIndex.getName() + "_" + tid + "_"; |
| | | replaceExisting = |
| | | this.fileNamePrefix = vlvIndex.getName() + "_" + tid + "_"; |
| | | this.replaceExisting = |
| | | importContext.getLDIFImportConfig().appendToExistingData() && |
| | | importContext.getLDIFImportConfig().replaceExistingEntries(); |
| | | addBytesDataStream = new DataOutputStream(addBytesStream); |
| | | delBytesDataStream = new DataOutputStream(delBytesStream); |
| | | } |
| | | |
| | | /** |
| | |
| | | public void startProcessing() |
| | | { |
| | | // Clean up any work files left over from a previous run. |
| | | File tempDir = getFileForPath( |
| | | importContext.getConfig().getImportTempDirectory()); |
| | | File[] files = tempDir.listFiles(filter); |
| | | if (files != null) |
| | | { |
| | |
| | | public void processEntry(Entry oldEntry, Entry newEntry, EntryID entryID) |
| | | throws DatabaseException, IOException, DirectoryException |
| | | { |
| | | Transaction txn = null; |
| | | SortValues newValues = new SortValues(entryID, newEntry, |
| | | vlvIndex.sortOrder); |
| | | // Update the vlvIndex for this entry. |
| | |
| | | // Start a new file. |
| | | fileNumber++; |
| | | String fileName = fileNamePrefix + String.valueOf(fileNumber) + "_add"; |
| | | File file = new File(getFileForPath( |
| | | importContext.getConfig().getImportTempDirectory()), |
| | | fileName); |
| | | File file = new File(tempDir, fileName); |
| | | BufferedOutputStream bufferedStream = |
| | | new BufferedOutputStream(new FileOutputStream(file)); |
| | | DataOutputStream dataStream = new DataOutputStream(bufferedStream); |
| | |
| | | if (replaceExisting) |
| | | { |
| | | fileName = fileNamePrefix + String.valueOf(fileNumber) + "_del"; |
| | | file = new File(getFileForPath( |
| | | importContext.getConfig().getImportTempDirectory()), |
| | | fileName); |
| | | file = new File(tempDir, fileName); |
| | | bufferedStream = |
| | | new BufferedOutputStream(new FileOutputStream(file)); |
| | | dataStream = new DataOutputStream(bufferedStream); |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.backends.jeb; |
| | | import org.opends.messages.Message; |
| | |
| | | * A thread to merge a set of intermediate files from an vlvIndex builder |
| | | * into an vlvIndex database. |
| | | */ |
| | | public class VLVIndexMergeThread extends DirectoryThread |
| | | class VLVIndexMergeThread extends DirectoryThread |
| | | { |
| | | /** |
| | | * The tracer object for the debug logger. |
| | |
| | | /** |
| | | * The configuration of the JE backend containing the vlvIndex. |
| | | */ |
| | | LocalDBBackendCfg config; |
| | | private LocalDBBackendCfg config; |
| | | |
| | | /** |
| | | * The LDIF import configuration, which indicates whether we are |
| | | * appending to existing data. |
| | | */ |
| | | LDIFImportConfig ldifImportConfig; |
| | | private LDIFImportConfig ldifImportConfig; |
| | | |
| | | /** |
| | | * The vlvIndex database being written. |
| | | */ |
| | | VLVIndex vlvIndex; |
| | | |
| | | /** |
| | | * The name of the vlvIndex for use in file names and log messages. |
| | | */ |
| | | String indexName; |
| | | private VLVIndex vlvIndex; |
| | | |
| | | /** |
| | | * Indicates whether we are replacing existing data or not. |
| | |
| | | * whether we are appending to existing data. |
| | | * @param vlvIndex The vlvIndex database to be written. |
| | | */ |
| | | VLVIndexMergeThread(LocalDBBackendCfg config, |
| | | public VLVIndexMergeThread(LocalDBBackendCfg config, |
| | | LDIFImportConfig ldifImportConfig, |
| | | VLVIndex vlvIndex) |
| | | { |
| | |
| | | public void merge() throws Exception |
| | | { |
| | | // Open all the files. |
| | | File tempDir = getFileForPath(config.getImportTempDirectory()); |
| | | File parentDir = getFileForPath(config.getImportTempDirectory()); |
| | | File tempDir = new File(parentDir, config.getBackendId()); |
| | | File[] files = tempDir.listFiles(filter); |
| | | |
| | | if (files == null || files.length == 0) |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2007-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.core; |
| | | import org.opends.messages.Message; |
| | |
| | | boolean sendNotification, Message message |
| | | ) |
| | | { |
| | | // Since abandon operations can't be cancelled, we don't need to do anything |
| | | // but forward the request on to the client connection. |
| | | clientConnection.disconnect(disconnectReason, sendNotification, |
| | | message); |
| | | } |
| | |
| | | import static org.opends.server.loggers.ErrorLogger.logError; |
| | | import static org.opends.server.loggers.debug.DebugLogger.debugEnabled; |
| | | import static org.opends.server.loggers.debug.DebugLogger.getTracer; |
| | | import static org.opends.server.util.StaticUtils.getExceptionMessage; |
| | | import static org.opends.server.util.StaticUtils.*; |
| | | |
| | | import java.io.BufferedReader; |
| | | import java.io.ByteArrayInputStream; |
| | | import java.io.ByteArrayOutputStream; |
| | | import java.io.File; |
| | | import java.io.IOException; |
| | | import java.io.StringReader; |
| | | import java.util.ArrayList; |
| | |
| | | |
| | | import org.opends.messages.Message; |
| | | import org.opends.server.admin.Configuration; |
| | | import org.opends.server.admin.server.ServerManagementContext; |
| | | import org.opends.server.admin.std.server.BackendCfg; |
| | | import org.opends.server.admin.std.server.LocalDBBackendCfg; |
| | | import org.opends.server.admin.std.server.ReplicationServerCfg; |
| | | import org.opends.server.admin.std.server.ReplicationSynchronizationProviderCfg; |
| | | import org.opends.server.admin.std.server.RootCfg; |
| | | import org.opends.server.admin.std.server.SynchronizationProviderCfg; |
| | | import org.opends.server.api.Backend; |
| | | import org.opends.server.api.SynchronizationProvider; |
| | | import org.opends.server.backends.jeb.BackupManager; |
| | |
| | | // The set of supported features for this backend. |
| | | private HashSet<String> supportedFeatures; |
| | | |
| | | // The directory associated with this backend. |
| | | private BackupDirectory backendDirectory; |
| | | |
| | | ReplicationServer server; |
| | | private ReplicationServer server; |
| | | |
| | | /** |
| | | * The configuration of this backend. |
| | | */ |
| | | private LocalDBBackendCfg cfg; |
| | | private BackendCfg cfg; |
| | | |
| | | /** |
| | | * The number of milliseconds between job progress reports. |
| | |
| | | if (config != null) |
| | | { |
| | | Validator.ensureTrue(config instanceof BackendCfg); |
| | | cfg = (LocalDBBackendCfg)config; |
| | | cfg = (BackendCfg)config; |
| | | DN[] baseDNs = new DN[cfg.getBaseDN().size()]; |
| | | cfg.getBaseDN().toArray(baseDNs); |
| | | setBaseDNs(baseDNs); |
| | | backendDirectory = new BackupDirectory( |
| | | cfg.getDBDirectory(), null); |
| | | } |
| | | } |
| | | |
| | |
| | | { |
| | | try |
| | | { |
| | | server = retrievesReplicationServer(); |
| | | server = getReplicationServer(); |
| | | if (server == null) |
| | | { |
| | | return 0; |
| | |
| | | public void createBackup(BackupConfig backupConfig) |
| | | throws DirectoryException |
| | | { |
| | | BackupManager backupManager = |
| | | new BackupManager(getBackendID()); |
| | | backupManager.createBackup(cfg, backupConfig); |
| | | BackupManager backupManager = new BackupManager(getBackendID()); |
| | | File backendDir = getFileForPath(getReplicationServerCfg() |
| | | .getReplicationDBDirectory()); |
| | | backupManager.createBackup(backendDir, backupConfig); |
| | | } |
| | | |
| | | |
| | |
| | | { |
| | | BackupManager backupManager = |
| | | new BackupManager(getBackendID()); |
| | | backupManager.removeBackup(this.backendDirectory, backupID); |
| | | backupManager.removeBackup(backupDirectory, backupID); |
| | | } |
| | | |
| | | |
| | |
| | | { |
| | | BackupManager backupManager = |
| | | new BackupManager(getBackendID()); |
| | | backupManager.restoreBackup(cfg, restoreConfig); |
| | | File backendDir = getFileForPath(getReplicationServerCfg() |
| | | .getReplicationDBDirectory()); |
| | | backupManager.restoreBackup(backendDir, restoreConfig); |
| | | } |
| | | |
| | | |
| | |
| | | /** |
| | | * This class reports progress of the export job at fixed intervals. |
| | | */ |
| | | class ProgressTask extends TimerTask |
| | | private final class ProgressTask extends TimerTask |
| | | { |
| | | /** |
| | | * The number of entries that had been exported at the time of the |
| | |
| | | |
| | | if (server==null) |
| | | { |
| | | server = retrievesReplicationServer(); |
| | | server = getReplicationServer(); |
| | | |
| | | if (server == null) |
| | | { |
| | |
| | | * @return The server retrieved |
| | | * @throws DirectoryException When it occurs. |
| | | */ |
| | | protected static ReplicationServer retrievesReplicationServer() |
| | | throws DirectoryException |
| | | private ReplicationServer getReplicationServer() throws DirectoryException |
| | | { |
| | | ReplicationServer replicationServer = null; |
| | | |
| | | DirectoryServer.getSynchronizationProviders(); |
| | | for (SynchronizationProvider provider : |
| | | for (SynchronizationProvider<?> provider : |
| | | DirectoryServer.getSynchronizationProviders()) |
| | | { |
| | | if (provider instanceof MultimasterReplication) |
| | |
| | | return replicationServer; |
| | | } |
| | | |
| | | // Find the replication server configuration associated with this |
| | | // replication backend. |
| | | private ReplicationServerCfg getReplicationServerCfg() |
| | | throws DirectoryException { |
| | | RootCfg root = ServerManagementContext.getInstance().getRootConfiguration(); |
| | | |
| | | for (String name : root.listSynchronizationProviders()) { |
| | | SynchronizationProviderCfg cfg; |
| | | try { |
| | | cfg = root.getSynchronizationProvider(name); |
| | | } catch (ConfigException e) { |
| | | throw new DirectoryException(ResultCode.OPERATIONS_ERROR, |
| | | ERR_REPLICATION_SERVER_CONFIG_NOT_FOUND.get(), e); |
| | | } |
| | | if (cfg instanceof ReplicationSynchronizationProviderCfg) { |
| | | ReplicationSynchronizationProviderCfg scfg = |
| | | (ReplicationSynchronizationProviderCfg) cfg; |
| | | try { |
| | | return scfg.getReplicationServer(); |
| | | } catch (ConfigException e) { |
| | | throw new DirectoryException(ResultCode.OPERATIONS_ERROR, |
| | | ERR_REPLICATION_SERVER_CONFIG_NOT_FOUND.get(), e); |
| | | } |
| | | } |
| | | } |
| | | |
| | | // No replication server found. |
| | | throw new DirectoryException(ResultCode.OPERATIONS_ERROR, |
| | | ERR_REPLICATION_SERVER_CONFIG_NOT_FOUND.get()); |
| | | } |
| | | |
| | | /** |
| | | * Writer class to read/write from/to a bytearray. |
| | | */ |
| | |
| | | "dn: ds-cfg-backend-id="+backendId+",cn=Backends,cn=config", |
| | | "objectClass: top", |
| | | "objectClass: ds-cfg-backend", |
| | | "objectClass: ds-cfg-local-db-backend", |
| | | "ds-cfg-base-dn: dc="+backendId, |
| | | "ds-cfg-enabled: true", |
| | | "ds-cfg-writability-mode: enabled", |
| | | "ds-cfg-java-class: " + |
| | | "org.opends.server.replication.server.ReplicationBackend", |
| | | "ds-cfg-backend-id: " + backendId, |
| | | "ds-cfg-import-temp-directory: importReplChangesTmp", |
| | | "ds-cfg-db-directory: " + dbDirname); |
| | | "ds-cfg-backend-id: " + backendId); |
| | | |
| | | LDIFImportConfig ldifImportConfig = new LDIFImportConfig( |
| | | new StringReader(ldif)); |
| | |
| | | ds-cfg-index-entry-limit: 1 |
| | | ds-cfg-subtree-delete-size-limit: 100000 |
| | | ds-cfg-preload-time-limit: 0 seconds |
| | | ds-cfg-import-temp-directory: importTmp |
| | | ds-cfg-import-temp-directory: import-tmp |
| | | ds-cfg-import-buffer-size: 256 megabytes |
| | | ds-cfg-import-queue-size: 100 |
| | | ds-cfg-import-pass-size: 0 |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server; |
| | | |
| | |
| | | //db_unindexed is the forth backend used by the unindexed search privilege |
| | | //test cases |
| | | String[] subDirectories = { "bak", "bin", "changelogDb", "classes", |
| | | "config", "db", "db_verify", "ldif", "lib", |
| | | "locks", "logs", "db_rebuild", "db_unindexed", |
| | | "db_index_test", "db_import_test"}; |
| | | "config", "db", "import-tmp", "db_verify", |
| | | "ldif", "lib", "locks", "logs", "db_rebuild", |
| | | "db_unindexed", "db_index_test", |
| | | "db_import_test"}; |
| | | for (String s : subDirectories) |
| | | { |
| | | new File(testRoot, s).mkdir(); |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.backends.jeb; |
| | | |
| | |
| | | "ds-cfg-backend-id: indexRoot", |
| | | "ds-cfg-db-directory:: " + |
| | | Base64.encode(homeDirName.getBytes()), |
| | | "ds-cfg-import-temp-directory: importTmp"); |
| | | "ds-cfg-import-temp-directory: import-tmp"); |
| | | |
| | | LocalDBBackendCfg cfg = AdminTestCaseUtils.getConfiguration( |
| | | LocalDBBackendCfgDefn.getInstance(), configEntry); |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2007-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.extensions; |
| | | |
| | |
| | | TestCaseUtils.dsconfig("create-backend", "--backend-name", "cacheTest", |
| | | "--type", "local-db", "--set", "db-directory:" + jeDir, "--set", |
| | | "base-dn:o=cachetest", "--set", |
| | | "import-temp-directory:importTmp", "--set", |
| | | "writability-mode:enabled", "--set", "enabled:true"); |
| | | |
| | | // Finalize this cache so it can be reconfigured. |
| | |
| | | * CDDL HEADER END |
| | | * |
| | | * |
| | | * Portions Copyright 2006-2007 Sun Microsystems, Inc. |
| | | * Portions Copyright 2006-2008 Sun Microsystems, Inc. |
| | | */ |
| | | package org.opends.server.replication.server; |
| | | |
| | |
| | | import java.util.UUID; |
| | | |
| | | import org.opends.server.TestCaseUtils; |
| | | import org.opends.server.api.SynchronizationProvider; |
| | | import org.opends.server.backends.task.TaskState; |
| | | import org.opends.server.core.DirectoryServer; |
| | | import org.opends.server.core.ModifyDNOperationBasis; |
| | |
| | | import org.opends.server.replication.common.ChangeNumber; |
| | | import org.opends.server.replication.common.ChangeNumberGenerator; |
| | | import org.opends.server.replication.common.ServerState; |
| | | import org.opends.server.replication.plugin.MultimasterReplication; |
| | | import org.opends.server.replication.plugin.ReplicationBroker; |
| | | import org.opends.server.replication.plugin.ReplicationServerListener; |
| | | import org.opends.server.replication.protocol.AddMsg; |
| | | import org.opends.server.replication.protocol.DeleteMsg; |
| | | import org.opends.server.replication.protocol.ModifyDNMsg; |
| | |
| | | replicationServerPort = socket.getLocalPort(); |
| | | socket.close(); |
| | | |
| | | ReplServerFakeConfiguration conf = |
| | | new ReplServerFakeConfiguration(replicationServerPort, null, 0, 1, 0, 0, null); |
| | | replicationServer = new ReplicationServer(conf);; |
| | | TestCaseUtils.dsconfig( |
| | | "create-replication-server", |
| | | "--provider-name", "Multimaster Synchronization", |
| | | "--set", "replication-port:" + replicationServerPort, |
| | | "--set", "replication-server-id:1"); |
| | | |
| | | DirectoryServer.getSynchronizationProviders(); |
| | | for (SynchronizationProvider<?> provider : DirectoryServer |
| | | .getSynchronizationProviders()) { |
| | | if (provider instanceof MultimasterReplication) { |
| | | MultimasterReplication mmp = (MultimasterReplication) provider; |
| | | ReplicationServerListener list = mmp.getReplicationServerListener(); |
| | | if (list != null) { |
| | | replicationServer = list.getReplicationServer(); |
| | | if (replicationServer != null) { |
| | | break; |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | // ReplServerFakeConfiguration conf = |
| | | // new ReplServerFakeConfiguration(replicationServerPort, null, 0, 1, 0, 0, null); |
| | | // replicationServer = new ReplicationServer(conf);; |
| | | } |
| | | |
| | | private void debugInfo(String s) |
| | |
| | | */ |
| | | private void stopChangelog() throws Exception |
| | | { |
| | | replicationServer.remove(); |
| | | shutdown(); |
| | | configure(); |
| | | newClient(); |
| | | newClientWithFirstChanges(); |
| | |
| | | @AfterClass() |
| | | public void shutdown() throws Exception |
| | | { |
| | | if (replicationServer != null) { |
| | | replicationServer.remove(); |
| | | replicationServer = null; |
| | | } |
| | | TestCaseUtils.dsconfig( |
| | | "delete-replication-server", |
| | | "--provider-name", "Multimaster Synchronization"); |
| | | replicationServer = null; |
| | | } |
| | | |
| | | /** |