opendj-server-legacy/src/main/java/org/opends/server/api/Backupable.java
New file @@ -0,0 +1,126 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt * or http://forgerock.org/license/CDDLv1.0.html. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at legal-notices/CDDLv1_0.txt. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: * Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END * * Copyright 2015 ForgeRock AS */ package org.opends.server.api; import java.io.File; import java.nio.file.Path; import java.util.ListIterator; import org.opends.server.types.DirectoryException; /** * Represents an entity (storage, backend) that can be backed up. * <p> * The files to backup must be located under a root directory given by * {@code getDirectory()} method. They can be located at any depth level * in a sub-directory. For example, file1, file2 and file3 can be returned as * files to backup: * <pre> * +--- rootDirectory * | \--- file1 * | \--- subDirectory * | \--- file2 * | \--- file3 * </pre> * The {@code getDirectory()} method is also used to provide the root directory used for * the restore of the backup. The actual restore directory depends on the strategy used for * restore, which can be one of these two: * <ul> * <li>Direct restore: the backup is restored directly in the directory provided by {@code getDirectory()} method. * It is the responsibility of the backupable entity to manage saving of current files before the restore, and * to discard them at the end of a successful restore.</li> * <li>Indirect restore: the backup is restored in a temporary directory, derived from the directory provided * by {@code getDirectory()} method (suffixed by "restore-[backupID]"). It is the responsibility of the backupable * entity to switch from the temporary directory to the final one.</li> * </ul> * <p> * The restore strategy is given by {@code isDirectRestore()} method: if {@code true}, it is a direct restore, * otherwise it is an indirect restore. * <p> * Actions taken before and after the restore should be handled in the {@code beforeRestore()} and * {@code afterRestore()} methods. * * @see {@link BackupManager} */ public interface Backupable { /** * Returns the files to backup. * * @return an iterator of files to backup, which may be empty but never {@code null} * @throws DirectoryException * If an error occurs. */ ListIterator<Path> getFilesToBackup() throws DirectoryException; /** * Returns the directory which acts as the root of all files to backup and restore. * * @return the root directory */ File getDirectory(); /** * Indicates if restore is done directly in the restore directory. * * @return {@code true} if restore is done directly in the restore directory * provided by {@code getDirectory()} method, or {@code false} if restore * is done in a temporary directory. */ boolean isDirectRestore(); /** * Called before the restore operation begins. * <p> * In case of direct restore, the backupable entity should take any action * to save a copy of existing data before restore operation. Saving includes * removing the existing data and copying it in a save directory. * * @return the directory where current files are saved. It may be {@code null} * if not applicable. * @throws DirectoryException * If an error occurs. */ Path beforeRestore() throws DirectoryException; /** * Called after the restore operation has finished successfully. * <p> * For direct restore, the backupable entity can safely discard the saved copy. * For indirect restore, the backupable entity should switch the restored directory * to the final restore directory. * * @param restoreDirectory * The directory in which files have actually been restored. It is never * {@code null}. * @param saveDirectory * The directory in which current files have been saved. It may be * {@code null} if {@code beforeRestore()} returned {@code null}. * @throws DirectoryException * If an error occurs. */ void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException; } opendj-server-legacy/src/main/java/org/opends/server/backends/SchemaBackend.java
@@ -37,33 +37,25 @@ import static org.opends.server.util.StaticUtils.*; import java.io.File; import java.io.FileFilter; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.security.MessageDigest; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.zip.Deflater; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; import javax.crypto.Mac; import org.forgerock.i18n.LocalizableMessage; import org.forgerock.i18n.slf4j.LocalizedLogger; @@ -80,6 +72,7 @@ import org.opends.server.admin.std.server.SchemaBackendCfg; import org.opends.server.api.AlertGenerator; import org.opends.server.api.Backend; import org.opends.server.api.Backupable; import org.opends.server.api.ClientConnection; import org.opends.server.config.ConfigEntry; import org.opends.server.core.AddOperation; @@ -99,10 +92,12 @@ import org.opends.server.schema.NameFormSyntax; import org.opends.server.schema.ObjectClassSyntax; import org.opends.server.types.*; import org.opends.server.util.BackupManager; import org.opends.server.util.DynamicConstants; import org.opends.server.util.LDIFException; import org.opends.server.util.LDIFReader; import org.opends.server.util.LDIFWriter; import org.opends.server.util.StaticUtils; /** * This class defines a backend to hold the Directory Server schema information. @@ -110,10 +105,10 @@ * rather dynamically generates the schema entry whenever it is requested. */ public class SchemaBackend extends Backend<SchemaBackendCfg> implements ConfigurationChangeListener<SchemaBackendCfg>, AlertGenerator implements ConfigurationChangeListener<SchemaBackendCfg>, AlertGenerator, Backupable { private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); /** * The fully-qualified name of this class. @@ -3948,858 +3943,23 @@ /** {@inheritDoc} */ @Override public void createBackup(BackupConfig backupConfig) throws DirectoryException public void createBackup(BackupConfig backupConfig) throws DirectoryException { // Get the properties to use for the backup. We don't care whether or not // it's incremental, so there's no need to get that. String backupID = backupConfig.getBackupID(); BackupDirectory backupDirectory = backupConfig.getBackupDirectory(); boolean compress = backupConfig.compressData(); boolean encrypt = backupConfig.encryptData(); boolean hash = backupConfig.hashData(); boolean signHash = backupConfig.signHash(); // Create a hash map that will hold the extra backup property information // for this backup. HashMap<String,String> backupProperties = new HashMap<String,String>(); // Get the crypto manager and use it to obtain references to the message // digest and/or MAC to use for hashing and/or signing. CryptoManager cryptoManager = DirectoryServer.getCryptoManager(); Mac mac = null; MessageDigest digest = null; String macKeyID = null; if (hash) { if (signHash) { try { macKeyID = cryptoManager.getMacEngineKeyEntryID(); backupProperties.put(BACKUP_PROPERTY_MAC_KEY_ID, macKeyID); mac = cryptoManager.getMacEngine(macKeyID); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_SCHEMA_BACKUP_CANNOT_GET_MAC.get( macKeyID, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } else { String digestAlgorithm = cryptoManager.getPreferredMessageDigestAlgorithm(); backupProperties.put(BACKUP_PROPERTY_DIGEST_ALGORITHM, digestAlgorithm); try { digest = cryptoManager.getPreferredMessageDigest(); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_SCHEMA_BACKUP_CANNOT_GET_DIGEST.get( digestAlgorithm, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } } // Create an output stream that will be used to write the archive file. At // its core, it will be a file output stream to put a file on the disk. If // we are to encrypt the data, then that file output stream will be wrapped // in a cipher output stream. The resulting output stream will then be // wrapped by a zip output stream (which may or may not actually use // compression). String filename = null; OutputStream outputStream; try { filename = SCHEMA_BACKUP_BASE_FILENAME + backupID; File archiveFile = new File(backupDirectory.getPath() + File.separator + filename); if (archiveFile.exists()) { int i=1; while (true) { archiveFile = new File(backupDirectory.getPath() + File.separator + filename + "." + i); if (archiveFile.exists()) { i++; } else { filename = filename + "." + i; break; } } } outputStream = new FileOutputStream(archiveFile, false); backupProperties.put(BACKUP_PROPERTY_ARCHIVE_FILENAME, filename); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_SCHEMA_BACKUP_CANNOT_CREATE_ARCHIVE_FILE. get(filename, backupDirectory.getPath(), getExceptionMessage(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If we should encrypt the data, then wrap the output stream in a cipher // output stream. if (encrypt) { try { outputStream = cryptoManager.getCipherOutputStream(outputStream); } catch (CryptoManagerException e) { logger.traceException(e); LocalizableMessage message = ERR_SCHEMA_BACKUP_CANNOT_GET_CIPHER.get( stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Wrap the file output stream in a zip output stream. ZipOutputStream zipStream = new ZipOutputStream(outputStream); LocalizableMessage message = ERR_SCHEMA_BACKUP_ZIP_COMMENT.get( DynamicConstants.PRODUCT_NAME, backupID); try { zipStream.setComment(String.valueOf(message)); if (compress) { zipStream.setLevel(Deflater.DEFAULT_COMPRESSION); } else { zipStream.setLevel(Deflater.NO_COMPRESSION); } // Create a Comment Entry in the zip file // This ensure the backup is never empty, even wher // there is no schema file to backup. String commentName = "schema.comment"; // We'll put the name in the hash, too. if (hash) { if (signHash) { mac.update(getBytes(commentName)); } else { digest.update(getBytes(commentName)); } } try { ZipEntry zipEntry = new ZipEntry(commentName); zipStream.putNextEntry(zipEntry); zipStream.closeEntry(); } catch (Exception e) { logger.traceException(e); close(zipStream); message = ERR_SCHEMA_BACKUP_CANNOT_BACKUP_SCHEMA_FILE.get(commentName, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer .getServerErrorResultCode(), message, e); } // Get the path to the directory in which the schema files reside and // then get a list of all the files in that directory. String schemaInstanceDirPath = SchemaConfigManager.getSchemaDirectoryPath(); File schemaDir; File[] schemaFiles = null; try { schemaDir = new File(schemaInstanceDirPath); schemaFiles = schemaDir.listFiles(); } catch (Exception e) { // Can't locate or list Instance schema directory logger.traceException(e); message = ERR_SCHEMA_BACKUP_CANNOT_LIST_SCHEMA_FILES.get( schemaInstanceDirPath, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Iterate through the schema files and write them to the zip stream. If // we're using a hash or MAC, then calculate that as well. byte[] buffer = new byte[8192]; String parent = ".instance"; for (File schemaFile : schemaFiles) { if (backupConfig.isCancelled()) { break; } if (!schemaFile.isFile()) { // If there are any non-file items in the directory (e.g., one or more // subdirectories), then we'll skip them. continue; } String baseName = schemaFile.getName(); // We'll put the name in the hash, too. if (hash) { if (signHash) { mac.update(getBytes(baseName + parent)); } else { digest.update(getBytes(baseName + parent)); } } InputStream inputStream = null; try { ZipEntry zipEntry = new ZipEntry(baseName + parent); zipStream.putNextEntry(zipEntry); inputStream = new FileInputStream(schemaFile); while (true) { int bytesRead = inputStream.read(buffer); if (bytesRead < 0 || backupConfig.isCancelled()) { break; } if (hash) { if (signHash) { mac.update(buffer, 0, bytesRead); } else { digest.update(buffer, 0, bytesRead); } } zipStream.write(buffer, 0, bytesRead); } zipStream.closeEntry(); inputStream.close(); } catch (Exception e) { logger.traceException(e); close(inputStream, zipStream); message = ERR_SCHEMA_BACKUP_CANNOT_BACKUP_SCHEMA_FILE.get(baseName, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer .getServerErrorResultCode(), message, e); } } } finally { // We're done writing the file, so close the zip stream // (which should also close the underlying stream). try { zipStream.close(); } catch (Exception e) { logger.traceException(e); message = ERR_SCHEMA_BACKUP_CANNOT_CLOSE_ZIP_STREAM.get( filename, backupDirectory.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Get the digest or MAC bytes if appropriate. byte[] digestBytes = null; byte[] macBytes = null; if (hash) { if (signHash) { macBytes = mac.doFinal(); } else { digestBytes = digest.digest(); } } // Create the backup info structure for this backup and add it to the backup // directory. // FIXME -- Should I use the date from when I started or finished? BackupInfo backupInfo = new BackupInfo(backupDirectory, backupID, new Date(), false, compress, encrypt, digestBytes, macBytes, null, backupProperties); try { backupDirectory.addBackup(backupInfo); backupDirectory.writeBackupDirectoryDescriptor(); } catch (Exception e) { logger.traceException(e); message = ERR_SCHEMA_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( backupDirectory.getDescriptorPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } new BackupManager(getBackendID()).createBackup(this, backupConfig); } /** {@inheritDoc} */ @Override public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { BackupInfo backupInfo = backupDirectory.getBackupInfo(backupID); if (backupInfo == null) { LocalizableMessage message = ERR_BACKUP_MISSING_BACKUPID.get(backupID, backupDirectory.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } HashMap<String,String> backupProperties = backupInfo.getBackupProperties(); String archiveFilename = backupProperties.get(BACKUP_PROPERTY_ARCHIVE_FILENAME); File archiveFile = new File(backupDirectory.getPath(), archiveFilename); try { backupDirectory.removeBackup(backupID); } catch (ConfigException e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), e.getMessageObject()); } try { backupDirectory.writeBackupDirectoryDescriptor(); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( backupDirectory.getDescriptorPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Remove the archive file. archiveFile.delete(); new BackupManager(getBackendID()).removeBackup(backupDirectory, backupID); } /** {@inheritDoc} */ @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException { // First, make sure that the requested backup exists. BackupDirectory backupDirectory = restoreConfig.getBackupDirectory(); String backupPath = backupDirectory.getPath(); String backupID = restoreConfig.getBackupID(); BackupInfo backupInfo = backupDirectory.getBackupInfo(backupID); if (backupInfo == null) { LocalizableMessage message = ERR_SCHEMA_RESTORE_NO_SUCH_BACKUP.get(backupID, backupPath); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } // Read the backup info structure to determine the name of the file that // contains the archive. Then make sure that file exists. String backupFilename = backupInfo.getBackupProperty(BACKUP_PROPERTY_ARCHIVE_FILENAME); if (backupFilename == null) { LocalizableMessage message = ERR_SCHEMA_RESTORE_NO_BACKUP_FILE.get(backupID, backupPath); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } File backupFile = new File(backupPath + File.separator + backupFilename); try { if (! backupFile.exists()) { LocalizableMessage message = ERR_SCHEMA_RESTORE_NO_SUCH_FILE.get(backupID, backupFile.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } catch (DirectoryException de) { throw de; } catch (Exception e) { LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_CHECK_FOR_ARCHIVE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If the backup is hashed, then we need to get the message digest to use // to verify it. byte[] unsignedHash = backupInfo.getUnsignedHash(); MessageDigest digest = null; if (unsignedHash != null) { String digestAlgorithm = backupInfo.getBackupProperty(BACKUP_PROPERTY_DIGEST_ALGORITHM); if (digestAlgorithm == null) { LocalizableMessage message = ERR_SCHEMA_RESTORE_UNKNOWN_DIGEST.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { digest = DirectoryServer.getCryptoManager().getMessageDigest( digestAlgorithm); } catch (Exception e) { LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_GET_DIGEST.get(backupID, digestAlgorithm); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // If the backup is signed, then we need to get the MAC to use to verify it. byte[] signedHash = backupInfo.getSignedHash(); Mac mac = null; if (signedHash != null) { String macKeyID = backupInfo.getBackupProperty(BACKUP_PROPERTY_MAC_KEY_ID); if (macKeyID == null) { LocalizableMessage message = ERR_SCHEMA_RESTORE_UNKNOWN_MAC.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { mac = DirectoryServer.getCryptoManager().getMacEngine(macKeyID); } catch (Exception e) { LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_GET_MAC.get( backupID, macKeyID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Create the input stream that will be used to read the backup file. At // its core, it will be a file input stream. InputStream inputStream; try { inputStream = new FileInputStream(backupFile); } catch (Exception e) { LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_OPEN_BACKUP_FILE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If the backup is encrypted, then we need to wrap the file input stream // in a cipher input stream. if (backupInfo.isEncrypted()) { try { inputStream = DirectoryServer.getCryptoManager() .getCipherInputStream(inputStream); } catch (CryptoManagerException e) { LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_GET_CIPHER.get( backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Now wrap the resulting input stream in a zip stream so that we can read // its contents. We don't need to worry about whether to use compression or // not because it will be handled automatically. ZipInputStream zipStream = new ZipInputStream(inputStream); // Determine whether we should actually do the restore, or if we should just // try to verify the archive. If we are not going to verify only, then // move the current schema directory out of the way so we can keep it around // to restore if a problem occurs. String schemaInstanceDirPath = SchemaConfigManager.getSchemaDirectoryPath(); File schemaInstanceDir = new File(schemaInstanceDirPath); String backupInstanceDirPath = null; File schemaBackupInstanceDir = null; boolean verifyOnly = restoreConfig.verifyOnly(); if (! verifyOnly) { // Rename the current schema directory if it exists. try { if (schemaInstanceDir.exists()) { String schemaBackupInstanceDirPath = schemaInstanceDirPath + ".save"; backupInstanceDirPath = schemaBackupInstanceDirPath; schemaBackupInstanceDir = new File(backupInstanceDirPath); if (schemaBackupInstanceDir.exists()) { int i=2; while (true) { backupInstanceDirPath = schemaBackupInstanceDirPath + i; schemaBackupInstanceDir = new File(backupInstanceDirPath); if (schemaBackupInstanceDir.exists()) { i++; } else { break; } } } schemaInstanceDir.renameTo(schemaBackupInstanceDir); } } catch (Exception e) { LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_RENAME_CURRENT_DIRECTORY. get(backupID, schemaInstanceDirPath, backupInstanceDirPath, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Create a new directory to hold the restored schema files. try { schemaInstanceDir.mkdirs(); } catch (Exception e) { // Try to restore the previous schema directory if possible. This will // probably fail in this case, but try anyway. if (schemaBackupInstanceDir != null) { try { schemaBackupInstanceDir.renameTo(schemaInstanceDir); logger.info(NOTE_SCHEMA_RESTORE_RESTORED_OLD_SCHEMA, schemaInstanceDirPath); } catch (Exception e2) { logger.error(ERR_SCHEMA_RESTORE_CANNOT_RESTORE_OLD_SCHEMA, schemaBackupInstanceDir.getPath()); } } LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_CREATE_SCHEMA_DIRECTORY.get( backupID, schemaInstanceDirPath, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Read through the archive file an entry at a time. For each entry, update // the digest or MAC if necessary, and if we're actually doing the restore, // then write the files out into the schema directory. byte[] buffer = new byte[8192]; while (true) { ZipEntry zipEntry; try { zipEntry = zipStream.getNextEntry(); } catch (Exception e) { // Tell the user where the previous schema was archived. if (schemaBackupInstanceDir != null) { logger.error(ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED, schemaBackupInstanceDir.getPath()); } LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_GET_ZIP_ENTRY.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } if (zipEntry == null) { break; } // Get the filename for the zip entry and update the digest or MAC as // necessary. String fileName = zipEntry.getName(); if (digest != null) { digest.update(getBytes(fileName)); } if (mac != null) { mac.update(getBytes(fileName)); } String baseDirPath = schemaInstanceDirPath; Boolean restoreIt = true; if (fileName.endsWith(".instance")) { fileName = fileName.substring(0,fileName.lastIndexOf(".instance")); } else { // Skip file. // ".install" files are from old backups and should be ignored restoreIt = false; } // If we're doing the restore, then create the output stream to write the // file. OutputStream outputStream = null; if (!verifyOnly && restoreIt) { String filePath = baseDirPath + File.separator + fileName; try { outputStream = new FileOutputStream(filePath); } catch (Exception e) { // Tell the user where the previous schema was archived. if (schemaBackupInstanceDir != null) { logger.error(ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED, schemaBackupInstanceDir.getPath()); } LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_CREATE_FILE.get( backupID, filePath, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } // Read the contents of the file and update the digest or MAC as // necessary. If we're actually restoring it, then write it into the // new schema directory. try { while (true) { int bytesRead = zipStream.read(buffer); if (bytesRead < 0) { // We've reached the end of the entry. break; } // Update the digest or MAC if appropriate. if (digest != null) { digest.update(buffer, 0, bytesRead); } if (mac != null) { mac.update(buffer, 0, bytesRead); } // Write the data to the output stream if appropriate. if (outputStream != null) { outputStream.write(buffer, 0, bytesRead); } } // We're at the end of the file so close the output stream if we're // writing it. if (outputStream != null) { outputStream.close(); } } catch (Exception e) { // Tell the user where the previous schema was archived. if (schemaBackupInstanceDir != null) { logger.error(ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED, schemaBackupInstanceDir.getPath()); } LocalizableMessage message = ERR_SCHEMA_RESTORE_CANNOT_PROCESS_ARCHIVE_FILE.get( backupID, fileName, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Close the zip stream since we don't need it anymore. try { zipStream.close(); } catch (Exception e) { LocalizableMessage message = ERR_SCHEMA_RESTORE_ERROR_ON_ZIP_STREAM_CLOSE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // At this point, we should be done with the contents of the ZIP file and // the restore should be complete. If we were generating a digest or MAC, // then make sure it checks out. if (digest != null) { byte[] calculatedHash = digest.digest(); if (Arrays.equals(calculatedHash, unsignedHash)) { logger.info(NOTE_SCHEMA_RESTORE_UNSIGNED_HASH_VALID); } else { // Tell the user where the previous schema was archived. if (schemaBackupInstanceDir != null) { logger.error(ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED, schemaBackupInstanceDir.getPath()); } LocalizableMessage message = ERR_SCHEMA_RESTORE_UNSIGNED_HASH_INVALID.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } if (mac != null) { byte[] calculatedSignature = mac.doFinal(); if (Arrays.equals(calculatedSignature, signedHash)) { logger.info(NOTE_SCHEMA_RESTORE_SIGNED_HASH_VALID); } else { // Tell the user where the previous schema was archived. if (schemaBackupInstanceDir != null) { logger.error(ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED, schemaBackupInstanceDir.getPath()); } LocalizableMessage message = ERR_SCHEMA_RESTORE_SIGNED_HASH_INVALID.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } // If we are just verifying the archive, then we're done. if (verifyOnly) { logger.info(NOTE_SCHEMA_RESTORE_VERIFY_SUCCESSFUL, backupID, backupPath); return; } // If we've gotten here, then the archive was restored successfully. Get // rid of the temporary copy we made of the previous schema directory and // exit. if (schemaBackupInstanceDir != null) { recursiveDelete(schemaBackupInstanceDir); } logger.info(NOTE_SCHEMA_RESTORE_SUCCESSFUL, backupID, backupPath); new BackupManager(getBackendID()).restoreBackup(this, restoreConfig); } /** {@inheritDoc} */ @@ -5012,5 +4172,52 @@ public void preloadEntryCache() throws UnsupportedOperationException { throw new UnsupportedOperationException("Operation not supported."); } /** {@inheritDoc} */ @Override public File getDirectory() { return new File(SchemaConfigManager.getSchemaDirectoryPath()); } private static final FileFilter BACKUP_FILES_FILTER = new FileFilter() { @Override public boolean accept(File file) { return file.getName().endsWith(".ldif"); } }; /** {@inheritDoc} */ @Override public ListIterator<Path> getFilesToBackup() throws DirectoryException { return BackupManager.getFiles(getDirectory(), BACKUP_FILES_FILTER, getBackendID()).listIterator(); } /** {@inheritDoc} */ @Override public boolean isDirectRestore() { return true; } /** {@inheritDoc} */ @Override public Path beforeRestore() throws DirectoryException { // save current schema files in save directory return BackupManager.saveCurrentFilesToDirectory(this, getBackendID()); } /** {@inheritDoc} */ @Override public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException { // restore was successful, delete save directory StaticUtils.recursiveDelete(saveDirectory.toFile()); } } opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/BackendImpl.java
@@ -27,21 +27,29 @@ package org.opends.server.backends.jeb; import static com.sleepycat.je.EnvironmentConfig.*; import static org.forgerock.util.Reject.*; import static org.opends.messages.BackendMessages.*; import static org.opends.messages.JebMessages.*; import static org.opends.messages.UtilityMessages.*; import static org.opends.server.backends.jeb.ConfigurableEnvironment.*; import static org.opends.server.util.ServerConstants.*; import static org.opends.server.util.StaticUtils.*; import java.io.File; import java.io.FileFilter; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.NoSuchElementException; import java.util.Set; import java.util.SortedSet; import java.util.concurrent.ExecutionException; @@ -61,6 +69,7 @@ import org.opends.server.admin.std.server.LocalDBBackendCfg; import org.opends.server.api.AlertGenerator; import org.opends.server.api.Backend; import org.opends.server.api.Backupable; import org.opends.server.api.DiskSpaceMonitorHandler; import org.opends.server.api.MonitorProvider; import org.opends.server.backends.RebuildConfig; @@ -90,6 +99,7 @@ import org.opends.server.types.Operation; import org.opends.server.types.Privilege; import org.opends.server.types.RestoreConfig; import org.opends.server.util.BackupManager; import org.opends.server.util.RuntimeInformation; import com.sleepycat.je.DatabaseException; @@ -103,7 +113,7 @@ */ public class BackendImpl extends Backend<LocalDBBackendCfg> implements ConfigurationChangeListener<LocalDBBackendCfg>, AlertGenerator, DiskSpaceMonitorHandler DiskSpaceMonitorHandler, Backupable { private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); @@ -246,7 +256,9 @@ cfg.addLocalDBChangeListener(this); } private File getDirectory() /** {@inheritDoc} */ @Override public File getDirectory() { File parentDirectory = getFileForPath(cfg.getDBDirectory()); return new File(parentDirectory, cfg.getBackendId()); @@ -949,37 +961,230 @@ @Override public void createBackup(BackupConfig backupConfig) throws DirectoryException { BackupManager backupManager = new BackupManager(getBackendID()); File parentDir = getFileForPath(cfg.getDBDirectory()); File backendDir = new File(parentDir, cfg.getBackendId()); backupManager.createBackup(backendDir, backupConfig); new BackupManager(getBackendID()).createBackup(this, backupConfig); } /** {@inheritDoc} */ @Override public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { BackupManager backupManager = new BackupManager(getBackendID()); backupManager.removeBackup(backupDirectory, backupID); new BackupManager(getBackendID()).removeBackup(backupDirectory, backupID); } /** {@inheritDoc} */ @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException { BackupManager backupManager = new BackupManager(getBackendID()); File parentDir = getFileForPath(cfg.getDBDirectory()); File backendDir = new File(parentDir, cfg.getBackendId()); backupManager.restoreBackup(backendDir, restoreConfig); new BackupManager(getBackendID()).restoreBackup(this, restoreConfig); } /** {@inheritDoc} */ @Override public ListIterator<Path> getFilesToBackup() throws DirectoryException { return new JELogFilesIterator(getDirectory(), cfg.getBackendId()); } /** * Iterator on JE log files to backup. * <p> * The cleaner thread may delete some log files during the backup. The * iterator is automatically renewed if at least one file has been deleted. */ static class JELogFilesIterator implements ListIterator<Path> { /** Underlying iterator on files. */ private ListIterator<Path> iterator; /** Root directory where all files are located. */ private final File rootDirectory; private final String backendID; /** Files to backup. Used to renew the iterator if necessary. */ private List<Path> files; private String lastFileName = ""; private long lastFileSize; JELogFilesIterator(File rootDirectory, String backendID) throws DirectoryException { this.rootDirectory = rootDirectory; this.backendID = backendID; setFiles(BackupManager.getFiles(rootDirectory, new JELogFileFilter(), backendID)); } private void setFiles(List<Path> files) { this.files = files; Collections.sort(files); if (!files.isEmpty()) { Path lastFile = files.get(files.size() - 1); lastFileName = lastFile.getFileName().toString(); lastFileSize = lastFile.toFile().length(); } iterator = files.listIterator(); } /** {@inheritDoc} */ @Override public boolean hasNext() { boolean hasNext = iterator.hasNext(); if (!hasNext && !files.isEmpty()) { try { List<Path> allFiles = BackupManager.getFiles(rootDirectory, new JELogFileFilter(), backendID); List<Path> compare = new ArrayList<Path>(files); compare.removeAll(allFiles); if (!compare.isEmpty()) { // at least one file was deleted, the iterator must be renewed based on last file previously available List<Path> newFiles = BackupManager.getFiles(rootDirectory, new JELogFileFilter(lastFileName, lastFileSize), backendID); logger.info(NOTE_JEB_BACKUP_CLEANER_ACTIVITY.get(newFiles.size())); if (!newFiles.isEmpty()) { setFiles(newFiles); hasNext = iterator.hasNext(); } } } catch (DirectoryException e) { logger.error(ERR_BACKEND_LIST_FILES_TO_BACKUP.get(backendID, stackTraceToSingleLineString(e))); } } return hasNext; } /** {@inheritDoc} */ @Override public Path next() { if (hasNext()) { return iterator.next(); } throw new NoSuchElementException(); } /** {@inheritDoc} */ @Override public boolean hasPrevious() { return iterator.hasPrevious(); } /** {@inheritDoc} */ @Override public Path previous() { return iterator.previous(); } /** {@inheritDoc} */ @Override public int nextIndex() { return iterator.nextIndex(); } /** {@inheritDoc} */ @Override public int previousIndex() { return iterator.previousIndex(); } /** {@inheritDoc} */ @Override public void remove() { throw new UnsupportedOperationException("remove() is not implemented"); } /** {@inheritDoc} */ @Override public void set(Path e) { throw new UnsupportedOperationException("set() is not implemented"); } /** {@inheritDoc} */ @Override public void add(Path e) { throw new UnsupportedOperationException("add() is not implemented"); } } /** * This class implements a FilenameFilter to detect a JE log file, possibly with a constraint * on the file name and file size. */ private static class JELogFileFilter implements FileFilter { private final String latestFilename; private final long latestFileSize; /** * Creates the filter for log files that are newer than provided file name * or equal to provided file name and of larger size. */ JELogFileFilter(String latestFilename, long latestFileSize) { this.latestFilename = latestFilename; this.latestFileSize = latestFileSize; } /** Creates the filter for any JE log file. */ JELogFileFilter() { this("", 0); } /** {@inheritDoc} */ @Override public boolean accept(File file) { String name = file.getName(); int cmp = name.compareTo(latestFilename); return name.endsWith(".jdb") && (cmp > 0 || (cmp == 0 && file.length() > latestFileSize)); } } /** {@inheritDoc} */ @Override public boolean isDirectRestore() { // restore is done in an intermediate directory return false; } /** {@inheritDoc} */ @Override public Path beforeRestore() throws DirectoryException { return null; } /** {@inheritDoc} */ @Override public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException { // intermediate directory content is moved to database directory File targetDirectory = getDirectory(); recursiveDelete(targetDirectory); try { Files.move(restoreDirectory, targetDirectory.toPath()); } catch(IOException e) { LocalizableMessage msg = ERR_CANNOT_RENAME_RESTORE_DIRECTORY.get(restoreDirectory, targetDirectory.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), msg); } } /** {@inheritDoc} */ @Override opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/BackupManager.java
File was deleted opendj-server-legacy/src/main/java/org/opends/server/backends/persistit/PersistItStorage.java
@@ -31,12 +31,19 @@ import static org.opends.messages.BackendMessages.*; import static org.opends.messages.ConfigMessages.*; import static org.opends.messages.JebMessages.*; import static org.opends.messages.UtilityMessages.*; import static org.opends.server.util.StaticUtils.*; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.rmi.RemoteException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.NoSuchElementException; @@ -48,6 +55,7 @@ import org.forgerock.opendj.ldap.ByteString; import org.opends.server.admin.server.ConfigurationChangeListener; import org.opends.server.admin.std.server.PersistitBackendCfg; import org.opends.server.api.Backupable; import org.opends.server.api.DiskSpaceMonitorHandler; import org.opends.server.backends.pluggable.spi.Cursor; import org.opends.server.backends.pluggable.spi.Importer; @@ -63,7 +71,12 @@ import org.opends.server.core.MemoryQuota; import org.opends.server.core.ServerContext; import org.opends.server.extensions.DiskSpaceMonitor; import org.opends.server.types.BackupConfig; import org.opends.server.types.BackupDirectory; import org.opends.server.types.DirectoryException; import org.opends.server.types.FilePermission; import org.opends.server.types.RestoreConfig; import org.opends.server.util.BackupManager; import com.persistit.Configuration; import com.persistit.Configuration.BufferPoolConfiguration; @@ -81,7 +94,7 @@ /** PersistIt database implementation of the {@link Storage} engine. */ @SuppressWarnings("javadoc") public final class PersistItStorage implements Storage, ConfigurationChangeListener<PersistitBackendCfg>, public final class PersistItStorage implements Storage, Backupable, ConfigurationChangeListener<PersistitBackendCfg>, DiskSpaceMonitorHandler { private static final String VOLUME_NAME = "dj"; @@ -746,18 +759,120 @@ return new File(parentDir, config.getBackendId()); } /** {@inheritDoc} */ @Override public FilenameFilter getFilesToBackupFilter() public ListIterator<Path> getFilesToBackup() throws DirectoryException { return new FilenameFilter() try { @Override public boolean accept(File d, String name) // FIXME: use full programmatic way of retrieving backup file once available in persistIt String filesAsString = db.getManagement().execute("backup -f"); String[] allFiles = filesAsString.split("[\r\n]+"); final List<Path> files = new ArrayList<>(); for (String file : allFiles) { return name.startsWith(VOLUME_NAME) && !name.endsWith(".lck"); files.add(Paths.get(file)); } }; return files.listIterator(); } catch (RemoteException e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKEND_LIST_FILES_TO_BACKUP.get(config.getBackendId(), stackTraceToSingleLineString(e))); } } @Override public Path beforeRestore() throws DirectoryException { return null; } @Override public boolean isDirectRestore() { // restore is done in an intermediate directory return false; } @Override public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException { // intermediate directory content is moved to database directory File targetDirectory = getDirectory(); recursiveDelete(targetDirectory); try { Files.move(restoreDirectory, targetDirectory.toPath()); } catch(IOException e) { LocalizableMessage msg = ERR_CANNOT_RENAME_RESTORE_DIRECTORY.get(restoreDirectory, targetDirectory.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), msg); } } /** * Switch the database in append only mode. * <p> * This is a mandatory operation before performing a backup. */ private void switchToAppendOnlyMode() throws DirectoryException { try { // FIXME: use full programmatic way of switching to this mode once available in persistIt db.getManagement().execute("backup -y -a -c"); } catch (RemoteException e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKEND_SWITCH_TO_APPEND_MODE.get(config.getBackendId(), stackTraceToSingleLineString(e))); } } /** * Terminate the append only mode of the database. * <p> * This should be called only when database was previously switched to append only mode. */ private void endAppendOnlyMode() throws DirectoryException { try { // FIXME: use full programmatic way of ending append mode once available in persistIt db.getManagement().execute("backup -e"); } catch (RemoteException e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKEND_END_APPEND_MODE.get(config.getBackendId(), stackTraceToSingleLineString(e))); } } @Override public void createBackup(BackupConfig backupConfig) throws DirectoryException { switchToAppendOnlyMode(); try { new BackupManager(config.getBackendId()).createBackup(this, backupConfig); } finally { endAppendOnlyMode(); } } @Override public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { new BackupManager(config.getBackendId()).removeBackup(backupDirectory, backupID); } @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException { new BackupManager(config.getBackendId()).restoreBackup(this, restoreConfig); } /** opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/BackendImpl.java
@@ -817,7 +817,7 @@ @Override public void createBackup(BackupConfig backupConfig) throws DirectoryException { new BackupManager(getBackendID()).createBackup(storage, backupConfig); storage.createBackup(backupConfig); } /** {@inheritDoc} */ @@ -825,14 +825,14 @@ public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { new BackupManager(getBackendID()).removeBackup(backupDirectory, backupID); storage.removeBackup(backupDirectory, backupID); } /** {@inheritDoc} */ @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException { new BackupManager(getBackendID()).restoreBackup(storage, restoreConfig); storage.restoreBackup(restoreConfig); } /** opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/BackupManager.java
File was deleted opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/TracedStorage.java
@@ -25,9 +25,6 @@ */ package org.opends.server.backends.pluggable; import java.io.File; import java.io.FilenameFilter; import org.forgerock.i18n.slf4j.LocalizedLogger; import org.forgerock.opendj.ldap.ByteSequence; import org.forgerock.opendj.ldap.ByteString; @@ -42,6 +39,10 @@ import org.opends.server.backends.pluggable.spi.UpdateFunction; import org.opends.server.backends.pluggable.spi.WriteOperation; import org.opends.server.backends.pluggable.spi.WriteableTransaction; import org.opends.server.types.BackupConfig; import org.opends.server.types.BackupDirectory; import org.opends.server.types.DirectoryException; import org.opends.server.types.RestoreConfig; /** * Decorates a {@link Storage} with additional trace logging. @@ -249,18 +250,6 @@ } @Override public File getDirectory() { return storage.getDirectory(); } @Override public FilenameFilter getFilesToBackupFilter() { return storage.getFilesToBackupFilter(); } @Override public StorageStatus getStorageStatus() { return storage.getStorageStatus(); @@ -340,6 +329,36 @@ storage.write(op); } @Override public void createBackup(BackupConfig backupConfig) throws DirectoryException { storage.createBackup(backupConfig); if (logger.isTraceEnabled()) { logger.trace("Storage@%s.createBackup(%s)", storageId(), backendId); } } @Override public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { storage.removeBackup(backupDirectory, backupID); if (logger.isTraceEnabled()) { logger.trace("Storage@%s.removeBackup(%s, %s)", storageId(), backupID, backendId); } } @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException { storage.restoreBackup(restoreConfig); if (logger.isTraceEnabled()) { logger.trace("Storage@%s.restoreBackup(%s)", storageId(), backendId); } } private String hex(final ByteSequence bytes) { return bytes != null ? bytes.toByteString().toHexString() : null; @@ -349,4 +368,6 @@ { return System.identityHashCode(this); } } opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Storage.java
@@ -26,8 +26,11 @@ package org.opends.server.backends.pluggable.spi; import java.io.Closeable; import java.io.File; import java.io.FilenameFilter; import org.opends.server.types.BackupConfig; import org.opends.server.types.BackupDirectory; import org.opends.server.types.DirectoryException; import org.opends.server.types.RestoreConfig; /** * This interface abstracts the underlying storage engine, @@ -100,27 +103,40 @@ */ boolean supportsBackupAndRestore(); /** * Returns the file system directory in which any database files are located. This method is * called when performing backup and restore operations. * * @return The file system directory in which any database files are located. * @throws UnsupportedOperationException * If backup and restore is not supported by this storage. */ File getDirectory(); /** * Returns a filename filter which selects the files to be included in a backup. This method is * called when performing backup operations. * * @return A filename filter which selects the files to be included in a backup. * @throws UnsupportedOperationException * If backup and restore is not supported by this storage. */ FilenameFilter getFilesToBackupFilter(); /** {@inheritDoc} */ @Override void close(); /** * Creates a backup for this storage. * * @param backupConfig * The configuration to use when performing the backup. * @throws DirectoryException * If a Directory Server error occurs. */ void createBackup(BackupConfig backupConfig) throws DirectoryException; /** * Removes a backup for this storage. * * @param backupDirectory * The backup directory structure with which the specified backup is * associated. * @param backupID * The backup ID for the backup to be removed. * @throws DirectoryException * If it is not possible to remove the specified backup. */ void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException; /** * Restores a backup for this storage. * * @param restoreConfig * The configuration to use when performing the restore. * @throws DirectoryException * If a Directory Server error occurs. */ void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException; } opendj-server-legacy/src/main/java/org/opends/server/backends/task/TaskBackend.java
@@ -29,19 +29,18 @@ import static org.forgerock.util.Reject.*; import static org.opends.messages.BackendMessages.*; import static org.opends.server.config.ConfigConstants.*; import static org.opends.server.util.ServerConstants.*; import static org.opends.server.util.StaticUtils.*; import java.io.*; import java.io.File; import java.io.FileFilter; import java.net.InetAddress; import java.security.MessageDigest; import java.util.*; import java.util.zip.Deflater; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; import javax.crypto.Mac; import java.nio.file.Path; import java.util.Collections; import java.util.GregorianCalendar; import java.util.Iterator; import java.util.List; import java.util.ListIterator; import java.util.Set; import org.forgerock.i18n.LocalizableMessage; import org.forgerock.i18n.slf4j.LocalizedLogger; @@ -56,14 +55,16 @@ import org.opends.server.admin.server.ConfigurationChangeListener; import org.opends.server.admin.std.server.TaskBackendCfg; import org.opends.server.api.Backend; import org.opends.server.api.Backupable; import org.opends.server.config.ConfigEntry; import org.opends.server.core.*; import org.opends.server.types.*; import org.opends.server.types.LockManager.DNLock; import org.opends.server.util.DynamicConstants; import org.opends.server.util.BackupManager; import org.opends.server.util.LDIFException; import org.opends.server.util.LDIFReader; import org.opends.server.util.LDIFWriter; import org.opends.server.util.StaticUtils; /** * This class provides an implementation of a Directory Server backend that may @@ -72,8 +73,9 @@ */ public class TaskBackend extends Backend<TaskBackendCfg> implements ConfigurationChangeListener<TaskBackendCfg> implements ConfigurationChangeListener<TaskBackendCfg>, Backupable { private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); @@ -1094,634 +1096,25 @@ /** {@inheritDoc} */ @Override public void createBackup(BackupConfig backupConfig) throws DirectoryException public void createBackup(BackupConfig backupConfig) throws DirectoryException { // Get the properties to use for the backup. We don't care whether or not // it's incremental, so there's no need to get that. String backupID = backupConfig.getBackupID(); BackupDirectory backupDirectory = backupConfig.getBackupDirectory(); boolean compress = backupConfig.compressData(); boolean encrypt = backupConfig.encryptData(); boolean hash = backupConfig.hashData(); boolean signHash = backupConfig.signHash(); // Create a hash map that will hold the extra backup property information // for this backup. HashMap<String,String> backupProperties = new HashMap<String,String>(); // Get the crypto manager and use it to obtain references to the message // digest and/or MAC to use for hashing and/or signing. CryptoManager cryptoManager = DirectoryServer.getCryptoManager(); Mac mac = null; MessageDigest digest = null; String digestAlgorithm = null; String macKeyID = null; if (hash) { if (signHash) { try { macKeyID = cryptoManager.getMacEngineKeyEntryID(); backupProperties.put(BACKUP_PROPERTY_MAC_KEY_ID, macKeyID); mac = cryptoManager.getMacEngine(macKeyID); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_TASKS_BACKUP_CANNOT_GET_MAC.get( macKeyID, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } else { digestAlgorithm = cryptoManager.getPreferredMessageDigestAlgorithm(); backupProperties.put(BACKUP_PROPERTY_DIGEST_ALGORITHM, digestAlgorithm); try { digest = cryptoManager.getPreferredMessageDigest(); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_TASKS_BACKUP_CANNOT_GET_DIGEST.get( digestAlgorithm, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } } // Create an output stream that will be used to write the archive file. At // its core, it will be a file output stream to put a file on the disk. If // we are to encrypt the data, then that file output stream will be wrapped // in a cipher output stream. The resulting output stream will then be // wrapped by a zip output stream (which may or may not actually use // compression). String filename = null; OutputStream outputStream; try { filename = TASKS_BACKUP_BASE_FILENAME + backupID; File archiveFile = new File(backupDirectory.getPath() + File.separator + filename); if (archiveFile.exists()) { int i=1; while (true) { archiveFile = new File(backupDirectory.getPath() + File.separator + filename + "." + i); if (archiveFile.exists()) { i++; } else { filename = filename + "." + i; break; } } } outputStream = new FileOutputStream(archiveFile, false); backupProperties.put(BACKUP_PROPERTY_ARCHIVE_FILENAME, filename); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_TASKS_BACKUP_CANNOT_CREATE_ARCHIVE_FILE. get(filename, backupDirectory.getPath(), getExceptionMessage(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If we should encrypt the data, then wrap the output stream in a cipher // output stream. if (encrypt) { try { outputStream = cryptoManager.getCipherOutputStream(outputStream); } catch (CryptoManagerException e) { logger.traceException(e); LocalizableMessage message = ERR_TASKS_BACKUP_CANNOT_GET_CIPHER.get( stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Wrap the file output stream in a zip output stream. ZipOutputStream zipStream = new ZipOutputStream(outputStream); LocalizableMessage message = ERR_TASKS_BACKUP_ZIP_COMMENT.get( DynamicConstants.PRODUCT_NAME, backupID); zipStream.setComment(String.valueOf(message)); if (compress) { zipStream.setLevel(Deflater.DEFAULT_COMPRESSION); } else { zipStream.setLevel(Deflater.NO_COMPRESSION); } // Take tasks file and write it to the zip stream. If we // are using a hash or MAC, then calculate that as well. byte[] buffer = new byte[8192]; File tasksFile = getFileForPath(taskBackingFile); String baseName = tasksFile.getName(); // We'll put the name in the hash, too. if (hash) { if (signHash) { mac.update(getBytes(baseName)); } else { digest.update(getBytes(baseName)); } } InputStream inputStream = null; try { ZipEntry zipEntry = new ZipEntry(baseName); zipStream.putNextEntry(zipEntry); inputStream = new FileInputStream(tasksFile); while (true) { int bytesRead = inputStream.read(buffer); if (bytesRead < 0 || backupConfig.isCancelled()) { break; } if (hash) { if (signHash) { mac.update(buffer, 0, bytesRead); } else { digest.update(buffer, 0, bytesRead); } } zipStream.write(buffer, 0, bytesRead); } zipStream.closeEntry(); inputStream.close(); } catch (Exception e) { logger.traceException(e); close(inputStream, zipStream); message = ERR_TASKS_BACKUP_CANNOT_BACKUP_TASKS_FILE.get(baseName, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } // We're done writing the file, so close the zip stream (which should also // close the underlying stream). try { zipStream.close(); } catch (Exception e) { logger.traceException(e); message = ERR_TASKS_BACKUP_CANNOT_CLOSE_ZIP_STREAM.get( filename, backupDirectory.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Get the digest or MAC bytes if appropriate. byte[] digestBytes = null; byte[] macBytes = null; if (hash) { if (signHash) { macBytes = mac.doFinal(); } else { digestBytes = digest.digest(); } } // Create the backup info structure for this backup and add it to the backup // directory. // FIXME -- Should I use the date from when I started or finished? BackupInfo backupInfo = new BackupInfo(backupDirectory, backupID, new Date(), false, compress, encrypt, digestBytes, macBytes, null, backupProperties); try { backupDirectory.addBackup(backupInfo); backupDirectory.writeBackupDirectoryDescriptor(); } catch (Exception e) { logger.traceException(e); message = ERR_TASKS_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( backupDirectory.getDescriptorPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } /** {@inheritDoc} */ @Override public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { BackupInfo backupInfo = backupDirectory.getBackupInfo(backupID); if (backupInfo == null) { LocalizableMessage message = ERR_BACKUP_MISSING_BACKUPID.get(backupID, backupDirectory.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } HashMap<String,String> backupProperties = backupInfo.getBackupProperties(); String archiveFilename = backupProperties.get(BACKUP_PROPERTY_ARCHIVE_FILENAME); File archiveFile = new File(backupDirectory.getPath(), archiveFilename); try { backupDirectory.removeBackup(backupID); } catch (ConfigException e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), e.getMessageObject()); } try { backupDirectory.writeBackupDirectoryDescriptor(); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( backupDirectory.getDescriptorPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Remove the archive file. archiveFile.delete(); new BackupManager(getBackendID()).createBackup(this, backupConfig); } /** {@inheritDoc} */ @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { // First, make sure that the requested backup exists. BackupDirectory backupDirectory = restoreConfig.getBackupDirectory(); String backupPath = backupDirectory.getPath(); String backupID = restoreConfig.getBackupID(); BackupInfo backupInfo = backupDirectory.getBackupInfo(backupID); boolean verifyOnly = restoreConfig.verifyOnly(); if (backupInfo == null) { LocalizableMessage message = ERR_TASKS_RESTORE_NO_SUCH_BACKUP.get(backupID, backupPath); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); new BackupManager(getBackendID()).removeBackup(backupDirectory, backupID); } // Read the backup info structure to determine the name of the file that // contains the archive. Then make sure that file exists. String backupFilename = backupInfo.getBackupProperty(BACKUP_PROPERTY_ARCHIVE_FILENAME); if (backupFilename == null) /** {@inheritDoc} */ @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException { LocalizableMessage message = ERR_TASKS_RESTORE_NO_BACKUP_FILE.get(backupID, backupPath); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); new BackupManager(getBackendID()).restoreBackup(this, restoreConfig); } File backupFile = new File(backupPath + File.separator + backupFilename); try { if (! backupFile.exists()) { LocalizableMessage message = ERR_TASKS_RESTORE_NO_SUCH_FILE.get(backupID, backupFile.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } catch (DirectoryException de) { throw de; } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_CHECK_FOR_ARCHIVE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If the backup is hashed, then we need to get the message digest to use // to verify it. byte[] unsignedHash = backupInfo.getUnsignedHash(); MessageDigest digest = null; if (unsignedHash != null) { String digestAlgorithm = backupInfo.getBackupProperty(BACKUP_PROPERTY_DIGEST_ALGORITHM); if (digestAlgorithm == null) { LocalizableMessage message = ERR_TASKS_RESTORE_UNKNOWN_DIGEST.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { digest = DirectoryServer.getCryptoManager().getMessageDigest( digestAlgorithm); } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_GET_DIGEST.get(backupID, digestAlgorithm); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // If the backup is signed, then we need to get the MAC to use to verify it. byte[] signedHash = backupInfo.getSignedHash(); Mac mac = null; if (signedHash != null) { String macKeyID = backupInfo.getBackupProperty(BACKUP_PROPERTY_MAC_KEY_ID); if (macKeyID == null) { LocalizableMessage message = ERR_TASKS_RESTORE_UNKNOWN_MAC.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { mac = DirectoryServer.getCryptoManager().getMacEngine(macKeyID); } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_GET_MAC.get( backupID, macKeyID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Create the input stream that will be used to read the backup file. At // its core, it will be a file input stream. InputStream inputStream; try { inputStream = new FileInputStream(backupFile); } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_OPEN_BACKUP_FILE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If the backup is encrypted, then we need to wrap the file input stream // in a cipher input stream. if (backupInfo.isEncrypted()) { try { inputStream = DirectoryServer.getCryptoManager() .getCipherInputStream(inputStream); } catch (CryptoManagerException e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_GET_CIPHER.get( backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Now wrap the resulting input stream in a zip stream so that we can read // its contents. We don't need to worry about whether to use compression or // not because it will be handled automatically. ZipInputStream zipStream = new ZipInputStream(inputStream); // Read through the archive file an entry at a time. For each entry, update // the digest or MAC if necessary. byte[] buffer = new byte[8192]; while (true) { ZipEntry zipEntry; try { zipEntry = zipStream.getNextEntry(); } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_GET_ZIP_ENTRY.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } if (zipEntry == null) { break; } // Get the filename for the zip entry and update the digest or MAC as // necessary. String fileName = zipEntry.getName(); if (digest != null) { digest.update(getBytes(fileName)); } if (mac != null) { mac.update(getBytes(fileName)); } // If we're doing the restore, then create the output stream to write the // file. File tasksFile = getFileForPath(taskBackingFile); String baseDirPath = tasksFile.getParent(); OutputStream outputStream = null; if (!verifyOnly) { String filePath = baseDirPath + File.separator + fileName; try { outputStream = new FileOutputStream(filePath); } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_CREATE_FILE.get( backupID, filePath, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } // Read the contents of the file and update the digest or MAC as // necessary. try { while (true) { int bytesRead = zipStream.read(buffer); if (bytesRead < 0) { // We've reached the end of the entry. break; } // Update the digest or MAC if appropriate. if (digest != null) { digest.update(buffer, 0, bytesRead); } if (mac != null) { mac.update(buffer, 0, bytesRead); } // Write the data to the output stream if appropriate. if (outputStream != null) { outputStream.write(buffer, 0, bytesRead); } } // We're at the end of the file so close the output stream if we're // writing it. if (outputStream != null) { outputStream.close(); } } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_CANNOT_PROCESS_ARCHIVE_FILE.get( backupID, fileName, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Close the zip stream since we don't need it anymore. try { zipStream.close(); } catch (Exception e) { LocalizableMessage message = ERR_TASKS_RESTORE_ERROR_ON_ZIP_STREAM_CLOSE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // At this point, we should be done with the contents of the ZIP file and // the restore should be complete. If we were generating a digest or MAC, // then make sure it checks out. if (digest != null) { byte[] calculatedHash = digest.digest(); if (Arrays.equals(calculatedHash, unsignedHash)) { logger.info(NOTE_TASKS_RESTORE_UNSIGNED_HASH_VALID); } else { LocalizableMessage message = ERR_TASKS_RESTORE_UNSIGNED_HASH_INVALID.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } if (mac != null) { byte[] calculatedSignature = mac.doFinal(); if (Arrays.equals(calculatedSignature, signedHash)) { logger.info(NOTE_TASKS_RESTORE_SIGNED_HASH_VALID); } else { LocalizableMessage message = ERR_TASKS_RESTORE_SIGNED_HASH_INVALID.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } // If we are just verifying the archive, then we're done. if (verifyOnly) { logger.info(NOTE_TASKS_RESTORE_VERIFY_SUCCESSFUL, backupID, backupPath); return; } // If we've gotten here, then the archive was restored successfully. logger.info(NOTE_TASKS_RESTORE_SUCCESSFUL, backupID, backupPath); } /** {@inheritDoc} */ @Override public boolean isConfigurationAcceptable(TaskBackendCfg config, @@ -2052,8 +1445,59 @@ /** {@inheritDoc} */ @Override public void preloadEntryCache() throws UnsupportedOperationException { public void preloadEntryCache() throws UnsupportedOperationException { throw new UnsupportedOperationException("Operation not supported."); } /** {@inheritDoc} */ @Override public File getDirectory() { return getFileForPath(taskBackingFile).getParentFile(); } private FileFilter getFilesToBackupFilter() { return new FileFilter() { @Override public boolean accept(File file) { return file.getName().equals(getFileForPath(taskBackingFile).getName()); } }; } /** {@inheritDoc} */ @Override public ListIterator<Path> getFilesToBackup() throws DirectoryException { return BackupManager.getFiles(getDirectory(), getFilesToBackupFilter(), getBackendID()).listIterator(); } /** {@inheritDoc} */ @Override public boolean isDirectRestore() { return true; } /** {@inheritDoc} */ @Override public Path beforeRestore() throws DirectoryException { // save current files return BackupManager.saveCurrentFilesToDirectory(this, getBackendID()); } /** {@inheritDoc} */ @Override public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException { // restore was successful, delete the save directory StaticUtils.recursiveDelete(saveDirectory.toFile()); } } opendj-server-legacy/src/main/java/org/opends/server/extensions/ConfigFileHandler.java
@@ -38,19 +38,13 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Path; import java.security.MessageDigest; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.zip.Deflater; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; import javax.crypto.Mac; import org.forgerock.i18n.LocalizableMessage; import org.forgerock.i18n.LocalizableMessageBuilder; @@ -65,6 +59,7 @@ import org.forgerock.util.Utils; import org.opends.server.admin.std.server.ConfigFileHandlerBackendCfg; import org.opends.server.api.AlertGenerator; import org.opends.server.api.Backupable; import org.opends.server.api.ClientConnection; import org.opends.server.api.ConfigAddListener; import org.opends.server.api.ConfigChangeListener; @@ -81,7 +76,7 @@ import org.opends.server.schema.GeneralizedTimeSyntax; import org.opends.server.tools.LDIFModify; import org.opends.server.types.*; import org.opends.server.util.DynamicConstants; import org.opends.server.util.BackupManager; import org.opends.server.util.LDIFException; import org.opends.server.util.LDIFReader; import org.opends.server.util.LDIFWriter; @@ -94,7 +89,7 @@ */ public class ConfigFileHandler extends ConfigHandler<ConfigFileHandlerBackendCfg> implements AlertGenerator implements AlertGenerator, Backupable { private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); @@ -1961,819 +1956,23 @@ /** {@inheritDoc} */ @Override public void createBackup(BackupConfig backupConfig) throws DirectoryException public void createBackup(BackupConfig backupConfig) throws DirectoryException { // Get the properties to use for the backup. We don't care whether or not // it's incremental, so there's no need to get that. String backupID = backupConfig.getBackupID(); BackupDirectory backupDirectory = backupConfig.getBackupDirectory(); boolean compress = backupConfig.compressData(); boolean encrypt = backupConfig.encryptData(); boolean hash = backupConfig.hashData(); boolean signHash = backupConfig.signHash(); // Create a hash map that will hold the extra backup property information // for this backup. HashMap<String,String> backupProperties = new HashMap<String,String>(); // Get the crypto manager and use it to obtain references to the message // digest and/or MAC to use for hashing and/or signing. CryptoManager cryptoManager = DirectoryServer.getCryptoManager(); Mac mac = null; MessageDigest digest = null; String macKeyID = null; if (hash) { if (signHash) { try { macKeyID = cryptoManager.getMacEngineKeyEntryID(); backupProperties.put(BACKUP_PROPERTY_MAC_KEY_ID, macKeyID); mac = cryptoManager.getMacEngine(macKeyID); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_CONFIG_BACKUP_CANNOT_GET_MAC.get( macKeyID, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } else { String digestAlgorithm = cryptoManager.getPreferredMessageDigestAlgorithm(); backupProperties.put(BACKUP_PROPERTY_DIGEST_ALGORITHM, digestAlgorithm); try { digest = cryptoManager.getPreferredMessageDigest(); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_CONFIG_BACKUP_CANNOT_GET_DIGEST.get( digestAlgorithm, stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } } // Create an output stream that will be used to write the archive file. At // its core, it will be a file output stream to put a file on the disk. If // we are to encrypt the data, then that file output stream will be wrapped // in a cipher output stream. The resulting output stream will then be // wrapped by a zip output stream (which may or may not actually use // compression). String filename = null; OutputStream outputStream; try { filename = CONFIG_BACKUP_BASE_FILENAME + backupID; File archiveFile = new File(backupDirectory.getPath() + File.separator + filename); if (archiveFile.exists()) { int i=1; while (true) { archiveFile = new File(backupDirectory.getPath() + File.separator + filename + "." + i); if (archiveFile.exists()) { i++; } else { filename = filename + "." + i; break; } } } outputStream = new FileOutputStream(archiveFile, false); backupProperties.put(BACKUP_PROPERTY_ARCHIVE_FILENAME, filename); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_CONFIG_BACKUP_CANNOT_CREATE_ARCHIVE_FILE. get(filename, backupDirectory.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If we should encrypt the data, then wrap the output stream in a cipher // output stream. if (encrypt) { try { outputStream = cryptoManager.getCipherOutputStream(outputStream); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_CONFIG_BACKUP_CANNOT_GET_CIPHER.get( stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Wrap the file output stream in a zip output stream. ZipOutputStream zipStream = new ZipOutputStream(outputStream); LocalizableMessage message = ERR_CONFIG_BACKUP_ZIP_COMMENT.get( DynamicConstants.PRODUCT_NAME, backupID); zipStream.setComment(message.toString()); if (compress) { zipStream.setLevel(Deflater.DEFAULT_COMPRESSION); } else { zipStream.setLevel(Deflater.NO_COMPRESSION); } // This may seem a little weird, but in this context, we only have access to // this class as a backend and not as the config handler. We need it as a // config handler to determine the path to the config file, so we can get // that from the Directory Server object. String configFile = null; try { configFile = ((ConfigFileHandler) DirectoryServer.getConfigHandler()).configFile; } catch (Exception e) { logger.traceException(e); message = ERR_CONFIG_BACKUP_CANNOT_DETERMINE_CONFIG_FILE_LOCATION. get(getExceptionMessage(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Read the Directory Server configuration file and put it in the archive. byte[] buffer = new byte[8192]; FileInputStream inputStream = null; try { File f = new File(configFile); ZipEntry zipEntry = new ZipEntry(f.getName()); zipStream.putNextEntry(zipEntry); inputStream = new FileInputStream(f); while (true) { int bytesRead = inputStream.read(buffer); if (bytesRead < 0 || backupConfig.isCancelled()) { break; } if (hash) { if (signHash) { mac.update(buffer, 0, bytesRead); } else { digest.update(buffer, 0, bytesRead); } } zipStream.write(buffer, 0, bytesRead); } inputStream.close(); zipStream.closeEntry(); } catch (Exception e) { logger.traceException(e); StaticUtils.close(inputStream, zipStream); message = ERR_CONFIG_BACKUP_CANNOT_BACKUP_CONFIG_FILE.get( configFile, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If an archive directory exists, then add its contents as well. try { File archiveDirectory = new File(new File(configFile).getParent(), CONFIG_ARCHIVE_DIR_NAME); if (archiveDirectory.exists()) { for (File archiveFile : archiveDirectory.listFiles()) { ZipEntry zipEntry = new ZipEntry(CONFIG_ARCHIVE_DIR_NAME + File.separator + archiveFile.getName()); zipStream.putNextEntry(zipEntry); inputStream = new FileInputStream(archiveFile); while (true) { int bytesRead = inputStream.read(buffer); if (bytesRead < 0 || backupConfig.isCancelled()) { break; } if (hash) { if (signHash) { mac.update(buffer, 0, bytesRead); } else { digest.update(buffer, 0, bytesRead); } } zipStream.write(buffer, 0, bytesRead); } inputStream.close(); zipStream.closeEntry(); } } } catch (Exception e) { logger.traceException(e); StaticUtils.close(inputStream, zipStream); message = ERR_CONFIG_BACKUP_CANNOT_BACKUP_ARCHIVED_CONFIGS.get( configFile, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // We're done writing the file, so close the zip stream (which should also // close the underlying stream). try { zipStream.close(); } catch (Exception e) { logger.traceException(e); message = ERR_CONFIG_BACKUP_CANNOT_CLOSE_ZIP_STREAM.get( filename, backupDirectory.getPath(), getExceptionMessage(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Get the digest or MAC bytes if appropriate. byte[] digestBytes = null; byte[] macBytes = null; if (hash) { if (signHash) { macBytes = mac.doFinal(); } else { digestBytes = digest.digest(); } } // Create the backup info structure for this backup and add it to the backup // directory. // FIXME -- Should I use the date from when I started or finished? BackupInfo backupInfo = new BackupInfo(backupDirectory, backupID, new Date(), false, compress, encrypt, digestBytes, macBytes, null, backupProperties); try { backupDirectory.addBackup(backupInfo); backupDirectory.writeBackupDirectoryDescriptor(); } catch (Exception e) { logger.traceException(e); message = ERR_CONFIG_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( backupDirectory.getDescriptorPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Remove the backup if this operation was cancelled since the // backup may be incomplete if (backupConfig.isCancelled()) { removeBackup(backupDirectory, backupID); } new BackupManager(getBackendID()).createBackup(this, backupConfig); } /** {@inheritDoc} */ @Override public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException { // NYI new BackupManager(getBackendID()).removeBackup(backupDirectory, backupID); } /** {@inheritDoc} */ @Override public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException { // First, make sure that the requested backup exists. BackupDirectory backupDirectory = restoreConfig.getBackupDirectory(); String backupPath = backupDirectory.getPath(); String backupID = restoreConfig.getBackupID(); BackupInfo backupInfo = backupDirectory.getBackupInfo(backupID); if (backupInfo == null) { LocalizableMessage message = ERR_CONFIG_RESTORE_NO_SUCH_BACKUP.get(backupID, backupPath); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } // Read the backup info structure to determine the name of the file that // contains the archive. Then make sure that file exists. String backupFilename = backupInfo.getBackupProperty(BACKUP_PROPERTY_ARCHIVE_FILENAME); if (backupFilename == null) { LocalizableMessage message = ERR_CONFIG_RESTORE_NO_BACKUP_FILE.get(backupID, backupPath); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } File backupFile = new File(backupPath + File.separator + backupFilename); try { if (! backupFile.exists()) { LocalizableMessage message = ERR_CONFIG_RESTORE_NO_SUCH_FILE.get(backupID, backupFile.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } catch (DirectoryException de) { throw de; } catch (Exception e) { LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_CHECK_FOR_ARCHIVE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If the backup is hashed, then we need to get the message digest to use // to verify it. byte[] unsignedHash = backupInfo.getUnsignedHash(); MessageDigest digest = null; if (unsignedHash != null) { String digestAlgorithm = backupInfo.getBackupProperty(BACKUP_PROPERTY_DIGEST_ALGORITHM); if (digestAlgorithm == null) { LocalizableMessage message = ERR_CONFIG_RESTORE_UNKNOWN_DIGEST.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { digest = DirectoryServer.getCryptoManager().getMessageDigest( digestAlgorithm); } catch (Exception e) { LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_GET_DIGEST.get(backupID, digestAlgorithm); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // If the backup is signed, then we need to get the MAC to use to verify it. byte[] signedHash = backupInfo.getSignedHash(); Mac mac = null; if (signedHash != null) { String macKeyID = backupInfo.getBackupProperty(BACKUP_PROPERTY_MAC_KEY_ID); if (macKeyID == null) { LocalizableMessage message = ERR_CONFIG_RESTORE_UNKNOWN_MAC.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } try { mac = DirectoryServer.getCryptoManager().getMacEngine(macKeyID); } catch (Exception e) { LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_GET_MAC.get( backupID, macKeyID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Create the input stream that will be used to read the backup file. At // its core, it will be a file input stream. InputStream inputStream; try { inputStream = new FileInputStream(backupFile); } catch (Exception e) { LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_OPEN_BACKUP_FILE.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // If the backup is encrypted, then we need to wrap the file input stream // in a cipher input stream. if (backupInfo.isEncrypted()) { try { inputStream = DirectoryServer.getCryptoManager() .getCipherInputStream(inputStream); } catch (Exception e) { LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_GET_CIPHER.get( backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Now wrap the resulting input stream in a zip stream so that we can read // its contents. We don't need to worry about whether to use compression or // not because it will be handled automatically. ZipInputStream zipStream = new ZipInputStream(inputStream); // Determine whether we should actually do the restore, or if we should just // try to verify the archive. If we are going to actually do the restore, // then create a directory and move the existing config files there so that // they can be restored in case something goes wrong. String configFilePath = ((ConfigFileHandler) DirectoryServer.getConfigHandler()).configFile; File configFile = new File(configFilePath); File configDir = configFile.getParentFile(); String configDirPath = configDir.getPath(); String backupDirPath = null; File configBackupDir = null; boolean verifyOnly = restoreConfig.verifyOnly(); if (! verifyOnly) { // Create a new directory to hold the current config files. try { if (configDir.exists()) { String configBackupDirPath = configDirPath + ".save"; backupDirPath = configBackupDirPath; configBackupDir = new File(backupDirPath); if (configBackupDir.exists()) { int i=2; while (true) { backupDirPath = configBackupDirPath + i; configBackupDir = new File(backupDirPath); if (configBackupDir.exists()) { i++; } else { break; } } } configBackupDir.mkdirs(); moveFile(configFile, configBackupDir); File archiveDirectory = new File(configDir, CONFIG_ARCHIVE_DIR_NAME); if (archiveDirectory.exists()) { File archiveBackupPath = new File(configBackupDir, CONFIG_ARCHIVE_DIR_NAME); archiveDirectory.renameTo(archiveBackupPath); } } } catch (Exception e) { LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_BACKUP_EXISTING_CONFIG. get(backupID, configDirPath, backupDirPath, getExceptionMessage(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // Create a new directory to hold the restored config files. try { configDir.mkdirs(); } catch (Exception e) { // Try to restore the previous config directory if possible. This will // probably fail in this case, but try anyway. if (configBackupDir != null) { try { configBackupDir.renameTo(configDir); logger.info(NOTE_CONFIG_RESTORE_RESTORED_OLD_CONFIG, configDirPath); } catch (Exception e2) { logger.error(ERR_CONFIG_RESTORE_CANNOT_RESTORE_OLD_CONFIG, configBackupDir.getPath()); } } LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_CREATE_CONFIG_DIRECTORY.get( backupID, configDirPath, getExceptionMessage(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Read through the archive file an entry at a time. For each entry, update // the digest or MAC if necessary, and if we're actually doing the restore, // then write the files out into the config directory. byte[] buffer = new byte[8192]; while (true) { ZipEntry zipEntry; try { zipEntry = zipStream.getNextEntry(); } catch (Exception e) { // Tell the user where the previous config was archived. if (configBackupDir != null) { logger.error(ERR_CONFIG_RESTORE_OLD_CONFIG_SAVED, configBackupDir.getPath()); } LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_GET_ZIP_ENTRY.get( backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } if (zipEntry == null) { break; } // Get the filename for the zip entry and update the digest or MAC as // necessary. String fileName = zipEntry.getName(); if (digest != null) { digest.update(getBytes(fileName)); } if (mac != null) { mac.update(getBytes(fileName)); } // If we're doing the restore, then create the output stream to write the // file. OutputStream outputStream = null; if (! verifyOnly) { File restoreFile = new File(configDirPath + File.separator + fileName); File parentDir = restoreFile.getParentFile(); try { if (! parentDir.exists()) { parentDir.mkdirs(); } outputStream = new FileOutputStream(restoreFile); } catch (Exception e) { // Tell the user where the previous config was archived. if (configBackupDir != null) { logger.error(ERR_CONFIG_RESTORE_OLD_CONFIG_SAVED, configBackupDir.getPath()); } LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_CREATE_FILE. get(backupID, restoreFile.getAbsolutePath(), stackTraceToSingleLineString(e)); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), message, e); } } // Read the contents of the file and update the digest or MAC as // necessary. If we're actually restoring it, then write it into the // new config directory. try { while (true) { int bytesRead = zipStream.read(buffer); if (bytesRead < 0) { // We've reached the end of the entry. break; } // Update the digest or MAC if appropriate. if (digest != null) { digest.update(buffer, 0, bytesRead); } if (mac != null) { mac.update(buffer, 0, bytesRead); } // Write the data to the output stream if appropriate. if (outputStream != null) { outputStream.write(buffer, 0, bytesRead); } } // We're at the end of the file so close the output stream if we're // writing it. if (outputStream != null) { outputStream.close(); } } catch (Exception e) { // Tell the user where the previous config was archived. if (configBackupDir != null) { logger.error(ERR_CONFIG_RESTORE_OLD_CONFIG_SAVED, configBackupDir.getPath()); } LocalizableMessage message = ERR_CONFIG_RESTORE_CANNOT_PROCESS_ARCHIVE_FILE.get( backupID, fileName, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } // Close the zip stream since we don't need it anymore. try { zipStream.close(); } catch (Exception e) { LocalizableMessage message = ERR_CONFIG_RESTORE_ERROR_ON_ZIP_STREAM_CLOSE.get( backupID, backupFile.getPath(), getExceptionMessage(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } // At this point, we should be done with the contents of the ZIP file and // the restore should be complete. If we were generating a digest or MAC, // then make sure it checks out. if (digest != null) { byte[] calculatedHash = digest.digest(); if (Arrays.equals(calculatedHash, unsignedHash)) { logger.info(NOTE_CONFIG_RESTORE_UNSIGNED_HASH_VALID); } else { // Tell the user where the previous config was archived. if (configBackupDir != null) { logger.error(ERR_CONFIG_RESTORE_OLD_CONFIG_SAVED, configBackupDir.getPath()); } LocalizableMessage message = ERR_CONFIG_RESTORE_UNSIGNED_HASH_INVALID.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } if (mac != null) { byte[] calculatedSignature = mac.doFinal(); if (Arrays.equals(calculatedSignature, signedHash)) { logger.info(NOTE_CONFIG_RESTORE_SIGNED_HASH_VALID); } else { // Tell the user where the previous config was archived. if (configBackupDir != null) { logger.error(ERR_CONFIG_RESTORE_OLD_CONFIG_SAVED, configBackupDir.getPath()); } LocalizableMessage message = ERR_CONFIG_RESTORE_SIGNED_HASH_INVALID.get(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } // If we are just verifying the archive, then we're done. if (verifyOnly) { logger.info(NOTE_CONFIG_RESTORE_VERIFY_SUCCESSFUL, backupID, backupPath); return; } // If we've gotten here, then the archive was restored successfully. Get // rid of the temporary copy we made of the previous config directory and // exit. if (configBackupDir != null) { recursiveDelete(configBackupDir); } logger.info(NOTE_CONFIG_RESTORE_SUCCESSFUL, backupID, backupPath); new BackupManager(getBackendID()).restoreBackup(this, restoreConfig); } /** {@inheritDoc} */ @@ -2857,4 +2056,66 @@ throw new UnsupportedOperationException("Operation not supported."); } /** {@inheritDoc} */ @Override public File getDirectory() { return getConfigFileInBackendContext().getParentFile(); } private File getConfigFileInBackendContext() { // This may seem a little weird, but in some context, we only have access to // this class as a backend and not as the config handler. We need it as a // config handler to determine the path to the config file, so we can get // that from the Directory Server object. return new File(((ConfigFileHandler) DirectoryServer.getConfigHandler()).configFile); } /** {@inheritDoc} */ @Override public ListIterator<Path> getFilesToBackup() { final List<Path> files = new ArrayList<>(); // the main config file File theConfigFile = getConfigFileInBackendContext(); files.add(theConfigFile.toPath()); // the files in archive directory File archiveDirectory = new File(getDirectory(), CONFIG_ARCHIVE_DIR_NAME); if (archiveDirectory.exists()) { for (File archiveFile : archiveDirectory.listFiles()) { files.add(archiveFile.toPath()); } } return files.listIterator(); } /** {@inheritDoc} */ @Override public boolean isDirectRestore() { return true; } /** {@inheritDoc} */ @Override public Path beforeRestore() throws DirectoryException { // save current config files to a save directory return BackupManager.saveCurrentFilesToDirectory(this, getBackendID()); } /** {@inheritDoc} */ @Override public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException { // restore was successful, delete save directory StaticUtils.recursiveDelete(saveDirectory.toFile()); } } opendj-server-legacy/src/main/java/org/opends/server/util/BackupManager.java
New file @@ -0,0 +1,1637 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt * or http://forgerock.org/license/CDDLv1.0.html. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at legal-notices/CDDLv1_0.txt. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: * Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END * * * Copyright 2006-2009 Sun Microsystems, Inc. * Portions Copyright 2013-2015 ForgeRock AS. */ package org.opends.server.util; import static java.util.Collections.*; import static org.opends.messages.BackendMessages.*; import static org.opends.messages.UtilityMessages.*; import static org.opends.server.util.ServerConstants.*; import static org.opends.server.util.StaticUtils.*; import java.io.BufferedReader; import java.io.Closeable; import java.io.File; import java.io.FileFilter; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.Set; import java.util.regex.Pattern; import java.util.zip.Deflater; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; import javax.crypto.Mac; import org.forgerock.i18n.LocalizableMessage; import org.forgerock.i18n.slf4j.LocalizedLogger; import org.forgerock.opendj.config.server.ConfigException; import org.forgerock.opendj.ldap.ResultCode; import org.forgerock.util.Pair; import org.opends.server.api.Backupable; import org.opends.server.core.DirectoryServer; import org.opends.server.types.BackupConfig; import org.opends.server.types.BackupDirectory; import org.opends.server.types.BackupInfo; import org.opends.server.types.CryptoManager; import org.opends.server.types.CryptoManagerException; import org.opends.server.types.DirectoryException; import org.opends.server.types.RestoreConfig; /** * A backup manager for any entity that is backupable (backend, storage). * * @see {@link Backupable} */ public class BackupManager { private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); /** * The common prefix for archive files. */ private static final String BACKUP_BASE_FILENAME = "backup-"; /** * The name of the property that holds the name of the latest log file * at the time the backup was created. */ private static final String PROPERTY_LAST_LOGFILE_NAME = "last_logfile_name"; /** * The name of the property that holds the size of the latest log file * at the time the backup was created. */ private static final String PROPERTY_LAST_LOGFILE_SIZE = "last_logfile_size"; /** * The name of the entry in an incremental backup archive file * containing a list of log files that are unchanged since the * previous backup. */ private static final String ZIPENTRY_UNCHANGED_LOGFILES = "unchanged.txt"; /** * The name of a dummy entry in the backup archive file that will act * as a placeholder in case a backup is done on an empty backend. */ private static final String ZIPENTRY_EMPTY_PLACEHOLDER = "empty.placeholder"; /** * The backend ID. */ private final String backendID; /** * Construct a backup manager for a backend. * * @param backendID * The ID of the backend instance for which a backup manager is * required. */ public BackupManager(String backendID) { this.backendID = backendID; } /** A cryptographic engine to use for backup creation or restore. */ private static abstract class CryptoEngine { final CryptoManager cryptoManager; final boolean shouldEncrypt; /** Creates a crypto engine for archive creation. */ static CryptoEngine forCreation(BackupConfig backupConfig, NewBackupParams backupParams) throws DirectoryException { if (backupConfig.hashData()) { if (backupConfig.signHash()) { return new MacCryptoEngine(backupConfig, backupParams); } else { return new DigestCryptoEngine(backupConfig, backupParams); } } else { return new NoHashCryptoEngine(backupConfig.encryptData()); } } /** Creates a crypto engine for archive restore. */ static CryptoEngine forRestore(BackupInfo backupInfo) throws DirectoryException { boolean hasSignedHash = backupInfo.getSignedHash() != null; boolean hasHashData = hasSignedHash || backupInfo.getUnsignedHash() != null; if (hasHashData) { if (hasSignedHash) { return new MacCryptoEngine(backupInfo); } else { return new DigestCryptoEngine(backupInfo); } } else { return new NoHashCryptoEngine(backupInfo.isEncrypted()); } } CryptoEngine(boolean shouldEncrypt) { cryptoManager = DirectoryServer.getCryptoManager(); this.shouldEncrypt = shouldEncrypt; } /** Indicates if data is encrypted. */ final boolean shouldEncrypt() { return shouldEncrypt; } /** Indicates if hashed data is signed. */ boolean hasSignedHash() { return false; } /** Update the hash with the provided string. */ abstract void updateHashWith(String s); /** Update the hash with the provided buffer. */ abstract void updateHashWith(byte[] buffer, int offset, int len); /** Generates the hash bytes. */ abstract byte[] generateBytes(); /** Returns the error message to use in case of check failure. */ abstract LocalizableMessage getErrorMessageForCheck(String backupID); /** Check that generated hash is equal to the provided hash. */ final void check(byte[] hash, String backupID) throws DirectoryException { byte[] bytes = generateBytes(); if (bytes != null && !Arrays.equals(bytes, hash)) { LocalizableMessage message = getErrorMessageForCheck(backupID); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } } /** Wraps an output stream in a cipher output stream if encryption is required. */ final OutputStream encryptOutput(OutputStream output) throws DirectoryException { if (!shouldEncrypt()) { return output; } try { return cryptoManager.getCipherOutputStream(output); } catch (CryptoManagerException e) { logger.traceException(e); StaticUtils.close(output); LocalizableMessage message = ERR_BACKUP_CANNOT_GET_CIPHER.get(stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } /** Wraps an input stream in a cipher input stream if encryption is required. */ final InputStream encryptInput(InputStream inputStream) throws DirectoryException { if (!shouldEncrypt) { return inputStream; } try { return cryptoManager.getCipherInputStream(inputStream); } catch (CryptoManagerException e) { logger.traceException(e); StaticUtils.close(inputStream); LocalizableMessage message = ERR_BACKUP_CANNOT_GET_CIPHER.get(stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } } /** Represents the cryptographic engine with no hash used for a backup. */ private static final class NoHashCryptoEngine extends CryptoEngine { NoHashCryptoEngine(boolean shouldEncrypt) { super(shouldEncrypt); } @Override void updateHashWith(String s) { // nothing to do } @Override void updateHashWith(byte[] buffer, int offset, int len) { // nothing to do } @Override byte[] generateBytes() { return null; } @Override LocalizableMessage getErrorMessageForCheck(String backupID) { // check never fails because bytes are always null return null; } } /** * Represents the cryptographic engine with signed hash. */ private static final class MacCryptoEngine extends CryptoEngine { private Mac mac; /** Constructor for backup creation. */ private MacCryptoEngine(BackupConfig backupConfig, NewBackupParams backupParams) throws DirectoryException { super(backupConfig.encryptData()); String macKeyID = null; try { macKeyID = cryptoManager.getMacEngineKeyEntryID(); backupParams.putProperty(BACKUP_PROPERTY_MAC_KEY_ID, macKeyID); } catch (CryptoManagerException e) { LocalizableMessage message = ERR_BACKUP_CANNOT_GET_MAC_KEY_ID.get(backupParams.backupID, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } retrieveMacEngine(macKeyID); } /** Constructor for backup restore. */ private MacCryptoEngine(BackupInfo backupInfo) throws DirectoryException { super(backupInfo.isEncrypted()); HashMap<String,String> backupProperties = backupInfo.getBackupProperties(); String macKeyID = backupProperties.get(BACKUP_PROPERTY_MAC_KEY_ID); retrieveMacEngine(macKeyID); } private void retrieveMacEngine(String macKeyID) throws DirectoryException { try { mac = cryptoManager.getMacEngine(macKeyID); } catch (Exception e) { LocalizableMessage message = ERR_BACKUP_CANNOT_GET_MAC.get(macKeyID, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } /** {@inheritDoc} */ @Override void updateHashWith(String s) { mac.update(getBytes(s)); } /** {@inheritDoc} */ @Override void updateHashWith(byte[] buffer, int offset, int len) { mac.update(buffer, offset, len); } @Override byte[] generateBytes() { return mac.doFinal(); } @Override boolean hasSignedHash() { return true; } @Override LocalizableMessage getErrorMessageForCheck(String backupID) { return ERR_BACKUP_SIGNED_HASH_ERROR.get(backupID); } @Override public String toString() { return "MacCryptoEngine [mac=" + mac + "]"; } } /** Represents the cryptographic engine with unsigned hash used for a backup. */ private static final class DigestCryptoEngine extends CryptoEngine { private final MessageDigest digest; /** Constructor for backup creation. */ private DigestCryptoEngine(BackupConfig backupConfig, NewBackupParams backupParams) throws DirectoryException { super(backupConfig.encryptData()); String digestAlgorithm = cryptoManager.getPreferredMessageDigestAlgorithm(); backupParams.putProperty(BACKUP_PROPERTY_DIGEST_ALGORITHM, digestAlgorithm); digest = retrieveMessageDigest(digestAlgorithm); } /** Constructor for backup restore. */ private DigestCryptoEngine(BackupInfo backupInfo) throws DirectoryException { super(backupInfo.isEncrypted()); HashMap<String, String> backupProperties = backupInfo.getBackupProperties(); String digestAlgorithm = backupProperties.get(BACKUP_PROPERTY_DIGEST_ALGORITHM); digest = retrieveMessageDigest(digestAlgorithm); } private MessageDigest retrieveMessageDigest(String digestAlgorithm) throws DirectoryException { try { return cryptoManager.getMessageDigest(digestAlgorithm); } catch (Exception e) { LocalizableMessage message = ERR_BACKUP_CANNOT_GET_DIGEST.get(digestAlgorithm, stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } /** {@inheritDoc} */ @Override public void updateHashWith(String s) { digest.update(getBytes(s)); } /** {@inheritDoc} */ @Override public void updateHashWith(byte[] buffer, int offset, int len) { digest.update(buffer, offset, len); } /** {@inheritDoc} */ @Override public byte[] generateBytes() { return digest.digest(); } /** {@inheritDoc} */ @Override LocalizableMessage getErrorMessageForCheck(String backupID) { return ERR_BACKUP_UNSIGNED_HASH_ERROR.get(backupID); } @Override public String toString() { return "DigestCryptoEngine [digest=" + digest + "]"; } } /** * Contains all parameters for creation of a new backup. */ private static final class NewBackupParams { final String backupID; final BackupDirectory backupDir; final HashMap<String,String> backupProperties; final boolean shouldCompress; final boolean isIncremental; final String incrementalBaseID; final BackupInfo baseBackupInfo; NewBackupParams(BackupConfig backupConfig) throws DirectoryException { backupID = backupConfig.getBackupID(); backupDir = backupConfig.getBackupDirectory(); backupProperties = new HashMap<String,String>(); shouldCompress = backupConfig.compressData(); incrementalBaseID = retrieveIncrementalBaseID(backupConfig); isIncremental = incrementalBaseID != null; baseBackupInfo = isIncremental ? getBackupInfo(backupDir, incrementalBaseID) : null; } private String retrieveIncrementalBaseID(BackupConfig backupConfig) { String id = null; if (backupConfig.isIncremental()) { if (backupConfig.getIncrementalBaseID() == null && backupDir.getLatestBackup() != null) { // The default is to use the latest backup as base. id = backupDir.getLatestBackup().getBackupID(); } else { id = backupConfig.getIncrementalBaseID(); } if (id == null) { // No incremental backup ID: log a message informing that a backup // could not be found and that a normal backup will be done. logger.warn(WARN_BACKUPDB_INCREMENTAL_NOT_FOUND_DOING_NORMAL, backupDir.getPath()); } } return id; } void putProperty(String name, String value) { backupProperties.put(name, value); } @Override public String toString() { return "BackupCreationParams [backupID=" + backupID + ", backupDir=" + backupDir.getPath() + "]"; } } /** Represents a new backup archive. */ private static final class NewBackupArchive { private final String archiveFilename; private String latestFileName; private long latestFileSize; private final HashSet<String> dependencies; private final String backendID; private final NewBackupParams newBackupParams; private final CryptoEngine cryptoEngine; NewBackupArchive(String backendID, NewBackupParams backupParams, CryptoEngine crypt) { this.backendID = backendID; this.newBackupParams = backupParams; this.cryptoEngine = crypt; dependencies = new HashSet<String>(); if (backupParams.isIncremental) { HashMap<String,String> properties = backupParams.baseBackupInfo.getBackupProperties(); latestFileName = properties.get(PROPERTY_LAST_LOGFILE_NAME); latestFileSize = Long.parseLong(properties.get(PROPERTY_LAST_LOGFILE_SIZE)); } archiveFilename = BACKUP_BASE_FILENAME + backendID + "-" + backupParams.backupID; } String getArchiveFilename() { return archiveFilename; } String getBackendID() { return backendID; } String getBackupID() { return newBackupParams.backupID; } String getBackupPath() { return newBackupParams.backupDir.getPath(); } void addBaseBackupAsDependency() { dependencies.add(newBackupParams.baseBackupInfo.getBackupID()); } void updateBackupDirectory() throws DirectoryException { BackupInfo backupInfo = createDescriptorForBackup(); try { newBackupParams.backupDir.addBackup(backupInfo); newBackupParams.backupDir.writeBackupDirectoryDescriptor(); } catch (Exception e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( newBackupParams.backupDir.getDescriptorPath(), stackTraceToSingleLineString(e)), e); } } /** Create a descriptor for the backup. */ private BackupInfo createDescriptorForBackup() { byte[] bytes = cryptoEngine.generateBytes(); byte[] digestBytes = cryptoEngine.hasSignedHash() ? null : bytes; byte[] macBytes = cryptoEngine.hasSignedHash() ? bytes : null; newBackupParams.putProperty(PROPERTY_LAST_LOGFILE_NAME, latestFileName); newBackupParams.putProperty(PROPERTY_LAST_LOGFILE_SIZE, String.valueOf(latestFileSize)); return new BackupInfo( newBackupParams.backupDir, newBackupParams.backupID, new Date(), newBackupParams.isIncremental, newBackupParams.shouldCompress, cryptoEngine.shouldEncrypt(), digestBytes, macBytes, dependencies, newBackupParams.backupProperties); } @Override public String toString() { return "NewArchive [archive file=" + archiveFilename + ", latestFileName=" + latestFileName + ", backendID=" + backendID + "]"; } } /** Represents an existing backup archive. */ private static final class ExistingBackupArchive { private final String backupID; private final BackupDirectory backupDir; private final BackupInfo backupInfo; private final CryptoEngine cryptoEngine; private final File archiveFile; ExistingBackupArchive(String backupID, BackupDirectory backupDir) throws DirectoryException { this.backupID = backupID; this.backupDir = backupDir; this.backupInfo = BackupManager.getBackupInfo(backupDir, backupID); this.cryptoEngine = CryptoEngine.forRestore(backupInfo); this.archiveFile = BackupManager.retrieveArchiveFile(backupInfo, backupDir.getPath()); } File getArchiveFile() { return archiveFile; } BackupInfo getBackupInfo() { return backupInfo; } String getBackupID() { return backupID; } CryptoEngine getCryptoEngine() { return cryptoEngine; } /** * Obtains a list of the dependencies of this backup in order from * the oldest (the full backup), to the most recent. * * @return A list of dependent backups. * @throws DirectoryException If a Directory Server error occurs. */ List<BackupInfo> getBackupDependencies() throws DirectoryException { List<BackupInfo> dependencies = new ArrayList<BackupInfo>(); BackupInfo currentBackupInfo = backupInfo; while (currentBackupInfo != null && !currentBackupInfo.getDependencies().isEmpty()) { String backupID = currentBackupInfo.getDependencies().iterator().next(); currentBackupInfo = backupDir.getBackupInfo(backupID); if (currentBackupInfo != null) { dependencies.add(currentBackupInfo); } } Collections.reverse(dependencies); return dependencies; } boolean hasDependencies() { return !backupInfo.getDependencies().isEmpty(); } /** Removes the archive from file system. */ boolean removeArchive() throws DirectoryException { try { backupDir.removeBackup(backupID); backupDir.writeBackupDirectoryDescriptor(); } catch (ConfigException e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), e.getMessageObject()); } catch (Exception e) { logger.traceException(e); LocalizableMessage message = ERR_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( backupDir.getDescriptorPath(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } return archiveFile.delete(); } } /** Represents a writer of a backup archive. */ private static final class BackupArchiveWriter implements Closeable { private final ZipOutputStream zipOutputStream; private final NewBackupArchive archive; private final CryptoEngine cryptoEngine; BackupArchiveWriter(NewBackupArchive archive) throws DirectoryException { this.archive = archive; this.cryptoEngine = archive.cryptoEngine; this.zipOutputStream = open(archive.getBackupPath(), archive.getArchiveFilename()); } @Override public void close() throws IOException { StaticUtils.close(zipOutputStream); } /** * Writes the provided file to a new entry in the archive. * * @param file * The file to be written. * @param cryptoMethod * The cryptographic method for the written data. * @param backupConfig * The configuration, used to know if operation is cancelled. * * @return The number of bytes written from the file. * @throws FileNotFoundException If the file to be archived does not exist. * @throws IOException If an I/O error occurs while archiving the file. */ long writeFile(Path file, String relativePath, CryptoEngine cryptoMethod, BackupConfig backupConfig) throws IOException, FileNotFoundException { ZipEntry zipEntry = new ZipEntry(relativePath); zipOutputStream.putNextEntry(zipEntry); cryptoMethod.updateHashWith(relativePath); InputStream inputStream = null; long totalBytesRead = 0; try { inputStream = new FileInputStream(file.toFile()); byte[] buffer = new byte[8192]; int bytesRead = inputStream.read(buffer); while (bytesRead > 0 && !backupConfig.isCancelled()) { cryptoMethod.updateHashWith(buffer, 0, bytesRead); zipOutputStream.write(buffer, 0, bytesRead); totalBytesRead += bytesRead; bytesRead = inputStream.read(buffer); } } finally { StaticUtils.close(inputStream); } zipOutputStream.closeEntry(); logger.info(NOTE_BACKUP_ARCHIVED_FILE, zipEntry.getName()); return totalBytesRead; } /** * Write a list of strings to an entry in the archive. * * @param stringList * A list of strings to be written. The strings must not * contain newlines. * @param fileName * The name of the zip entry to be written. * @param cryptoMethod * The cryptographic method for the written data. * @throws IOException * If an I/O error occurs while writing the archive entry. */ void writeStrings(List<String> stringList, String fileName, CryptoEngine cryptoMethod) throws IOException { ZipEntry zipEntry = new ZipEntry(fileName); zipOutputStream.putNextEntry(zipEntry); cryptoMethod.updateHashWith(fileName); Writer writer = new OutputStreamWriter(zipOutputStream); for (String s : stringList) { cryptoMethod.updateHashWith(s); writer.write(s); writer.write(EOL); } writer.flush(); zipOutputStream.closeEntry(); } /** Writes a empty placeholder entry into the archive. */ void writeEmptyPlaceHolder() throws DirectoryException { try { ZipEntry emptyPlaceholder = new ZipEntry(ZIPENTRY_EMPTY_PLACEHOLDER); zipOutputStream.putNextEntry(emptyPlaceholder); } catch (IOException e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_WRITE_ARCHIVE_FILE.get(ZIPENTRY_EMPTY_PLACEHOLDER, archive.getBackupID(), stackTraceToSingleLineString(e)), e); } } /** * Writes the files that are unchanged from the base backup (for an * incremental backup only). * <p> * The unchanged files names are listed in the "unchanged.txt" file, which * is put in the archive. * */ void writeUnchangedFiles(Path rootDirectory, ListIterator<Path> files, BackupConfig backupConfig) throws DirectoryException { List<String> unchangedFilenames = new ArrayList<String>(); while (files.hasNext() && !backupConfig.isCancelled()) { Path file = files.next(); String relativePath = rootDirectory.relativize(file).toString(); int cmp = relativePath.compareTo(archive.latestFileName); if (cmp > 0 || (cmp == 0 && file.toFile().length() != archive.latestFileSize)) { files.previous(); break; } logger.info(NOTE_BACKUP_FILE_UNCHANGED, relativePath); unchangedFilenames.add(relativePath); } if (!unchangedFilenames.isEmpty()) { writeUnchangedFilenames(unchangedFilenames); } } /** Writes the list of unchanged files names in a file as new entry in the archive. */ private void writeUnchangedFilenames(List<String> unchangedList) throws DirectoryException { String zipEntryName = ZIPENTRY_UNCHANGED_LOGFILES; try { writeStrings(unchangedList, zipEntryName, archive.cryptoEngine); } catch (IOException e) { logger.traceException(e); throw new DirectoryException( DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_WRITE_ARCHIVE_FILE.get(zipEntryName, archive.getBackupID(), stackTraceToSingleLineString(e)), e); } archive.addBaseBackupAsDependency(); } /** * Writes the new files in the archive. */ void writeChangedFiles(Path rootDirectory, ListIterator<Path> files, BackupConfig backupConfig) throws DirectoryException { while (files.hasNext() && !backupConfig.isCancelled()) { Path file = files.next(); String relativePath = rootDirectory.relativize(file).toString(); try { archive.latestFileSize = writeFile(file, relativePath, archive.cryptoEngine, backupConfig); archive.latestFileName = relativePath; } catch (FileNotFoundException e) { // The file may have been deleted by a cleaner (i.e. for JE storage) since we started. // The backupable entity is responsible for handling the changes through the files list iterator logger.traceException(e); } catch (IOException e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_WRITE_ARCHIVE_FILE.get(relativePath, archive.getBackupID(), stackTraceToSingleLineString(e)), e); } } } private ZipOutputStream open(String backupPath, String archiveFilename) throws DirectoryException { OutputStream output = openStream(backupPath, archiveFilename); output = cryptoEngine.encryptOutput(output); return openZipStream(output); } private OutputStream openStream(String backupPath, String archiveFilename) throws DirectoryException { OutputStream output = null; try { File archiveFile = new File(backupPath, archiveFilename); int i = 1; while (archiveFile.exists()) { archiveFile = new File(backupPath, archiveFilename + "." + i); i++; } output = new FileOutputStream(archiveFile, false); archive.newBackupParams.putProperty(BACKUP_PROPERTY_ARCHIVE_FILENAME, archiveFilename); return output; } catch (Exception e) { logger.traceException(e); StaticUtils.close(output); LocalizableMessage message = ERR_BACKUP_CANNOT_CREATE_ARCHIVE_FILE. get(archiveFilename, backupPath, archive.getBackupID(), stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); } } /** Wraps the file output stream in a zip output stream. */ private ZipOutputStream openZipStream(OutputStream outputStream) { ZipOutputStream zipStream = new ZipOutputStream(outputStream); zipStream.setComment(ERR_BACKUP_ZIP_COMMENT.get(DynamicConstants.PRODUCT_NAME, archive.getBackupID()) .toString()); if (archive.newBackupParams.shouldCompress) { zipStream.setLevel(Deflater.DEFAULT_COMPRESSION); } else { zipStream.setLevel(Deflater.NO_COMPRESSION); } return zipStream; } @Override public String toString() { return "BackupArchiveWriter [archive file=" + archive.getArchiveFilename() + ", backendId=" + archive.getBackendID() + "]"; } } /** Represents a reader of a backup archive. */ private static final class BackupArchiveReader { private final CryptoEngine cryptoEngine; private final File archiveFile; private final String identifier; private final BackupInfo backupInfo; BackupArchiveReader(String identifier, ExistingBackupArchive archive) { this.identifier = identifier; this.backupInfo = archive.getBackupInfo(); this.archiveFile = archive.getArchiveFile(); this.cryptoEngine = archive.getCryptoEngine(); } BackupArchiveReader(String identifier, BackupInfo backupInfo, String backupDirectoryPath) throws DirectoryException { this.identifier = identifier; this.backupInfo = backupInfo; this.archiveFile = BackupManager.retrieveArchiveFile(backupInfo, backupDirectoryPath); this.cryptoEngine = CryptoEngine.forRestore(backupInfo); } /** * Obtains the set of files in a backup that are unchanged from its * dependent backup or backups. * <p> * The file set is stored as as the first entry in the archive file. * * @return The set of files that are listed in "unchanged.txt" file * of the archive. * @throws DirectoryException * If an error occurs. */ Set<String> readUnchangedDependentFiles() throws DirectoryException { Set<String> hashSet = new HashSet<String>(); ZipInputStream zipStream = null; try { zipStream = openZipStream(); // Iterate through the entries in the zip file. ZipEntry zipEntry = zipStream.getNextEntry(); while (zipEntry != null) { // We are looking for the entry containing the list of unchanged files. if (ZIPENTRY_UNCHANGED_LOGFILES.equals(zipEntry.getName())) { hashSet.addAll(readAllLines(zipStream)); break; } zipEntry = zipStream.getNextEntry(); } return hashSet; } catch (IOException e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_RESTORE.get( identifier, stackTraceToSingleLineString(e)), e); } finally { StaticUtils.close(zipStream); } } /** * Restore the provided list of files from the provided restore directory. * @param restoreDir * The target directory for restored files. * @param filesToRestore * The set of files to restore. If empty, all files in the archive * are restored. * @param restoreConfig * The restore configuration, used to check for cancellation of * this restore operation. * @throws DirectoryException * If an error occurs. */ void restoreArchive(Path restoreDir, Set<String> filesToRestore, RestoreConfig restoreConfig, Backupable backupable) throws DirectoryException { try { restoreArchive0(restoreDir, filesToRestore, restoreConfig, backupable); } catch (IOException e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_RESTORE.get(identifier, stackTraceToSingleLineString(e)), e); } // check the hash byte[] hash = backupInfo.getUnsignedHash() != null ? backupInfo.getUnsignedHash() : backupInfo.getSignedHash(); cryptoEngine.check(hash, backupInfo.getBackupID()); } private void restoreArchive0(Path restoreDir, Set<String> filesToRestore, RestoreConfig restoreConfig, Backupable backupable) throws DirectoryException, IOException { ZipInputStream zipStream = null; try { zipStream = openZipStream(); ZipEntry zipEntry = zipStream.getNextEntry(); while (zipEntry != null && !restoreConfig.isCancelled()) { String zipEntryName = zipEntry.getName(); Pair<Boolean, ZipEntry> result = handleSpecialEntries(zipStream, zipEntryName); if (result.getFirst()) { zipEntry = result.getSecond(); continue; } boolean mustRestoreOnDisk = !restoreConfig.verifyOnly() && (filesToRestore.isEmpty() || filesToRestore.contains(zipEntryName)); if (mustRestoreOnDisk) { restoreZipEntry(zipEntryName, zipStream, restoreDir, restoreConfig); } else { restoreZipEntryVirtual(zipEntryName, zipStream, restoreConfig); } zipEntry = zipStream.getNextEntry(); } } finally { StaticUtils.close(zipStream); } } /** * Handle any special entry in the archive. * * @return the pair (true, zipEntry) if next entry was read, (false, null) otherwise */ private Pair<Boolean, ZipEntry> handleSpecialEntries(ZipInputStream zipStream, String zipEntryName) throws IOException { if (ZIPENTRY_EMPTY_PLACEHOLDER.equals(zipEntryName)) { // the backup contains no files return Pair.of(true, zipStream.getNextEntry()); } if (ZIPENTRY_UNCHANGED_LOGFILES.equals(zipEntryName)) { // This entry is treated specially. It is never restored, // and its hash is computed on the strings, not the bytes. cryptoEngine.updateHashWith(zipEntryName); List<String> lines = readAllLines(zipStream); for (String line : lines) { cryptoEngine.updateHashWith(line); } return Pair.of(true, zipStream.getNextEntry()); } return Pair.of(false, null); } /** * Restores a zip entry virtually (no actual write on disk). */ private void restoreZipEntryVirtual(String zipEntryName, ZipInputStream zipStream, RestoreConfig restoreConfig) throws FileNotFoundException, IOException { if (restoreConfig.verifyOnly()) { logger.info(NOTE_BACKUP_VERIFY_FILE, zipEntryName); } cryptoEngine.updateHashWith(zipEntryName); restoreFile(zipStream, null, restoreConfig); } /** * Restores a zip entry with actual write on disk. */ private void restoreZipEntry(String zipEntryName, ZipInputStream zipStream, Path restoreDir, RestoreConfig restoreConfig) throws IOException, DirectoryException { OutputStream outputStream = null; long totalBytesRead = 0; try { Path fileToRestore = restoreDir.resolve(zipEntryName); ensureFileCanBeRestored(fileToRestore); outputStream = new FileOutputStream(fileToRestore.toFile()); cryptoEngine.updateHashWith(zipEntryName); totalBytesRead = restoreFile(zipStream, outputStream, restoreConfig); logger.info(NOTE_BACKUP_RESTORED_FILE, zipEntryName, totalBytesRead); } finally { StaticUtils.close(outputStream); } } private void ensureFileCanBeRestored(Path fileToRestore) throws DirectoryException { Path parent = fileToRestore.getParent(); if (!Files.exists(parent)) { try { Files.createDirectories(parent); } catch (IOException e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_CREATE_DIRECTORY_TO_RESTORE_FILE.get(fileToRestore, identifier)); } } } /** * Restores the file provided by the zip input stream. * <p> * The restore can be virtual: if the outputStream is {@code null}, the file * is not actually restored on disk. */ private long restoreFile(ZipInputStream zipInputStream, OutputStream outputStream, RestoreConfig restoreConfig) throws IOException { long totalBytesRead = 0; byte[] buffer = new byte[8192]; int bytesRead = zipInputStream.read(buffer); while (bytesRead > 0 && !restoreConfig.isCancelled()) { totalBytesRead += bytesRead; cryptoEngine.updateHashWith(buffer, 0, bytesRead); if (outputStream != null) { outputStream.write(buffer, 0, bytesRead); } bytesRead = zipInputStream.read(buffer); } return totalBytesRead; } private InputStream openStream() throws DirectoryException { try { return new FileInputStream(archiveFile); } catch (FileNotFoundException e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_RESTORE.get(identifier, stackTraceToSingleLineString(e)), e); } } private ZipInputStream openZipStream() throws DirectoryException { InputStream inputStream = openStream(); inputStream = cryptoEngine.encryptInput(inputStream); return new ZipInputStream(inputStream); } private List<String> readAllLines(ZipInputStream zipStream) throws IOException { final ArrayList<String> results = new ArrayList<String>(); String line; BufferedReader reader = new BufferedReader(new InputStreamReader(zipStream)); while ((line = reader.readLine()) != null) { results.add(line); } return results; } } /** * Creates a backup of the provided backupable entity. * <p> * The backup is stored in a single zip file in the backup directory. * <p> * If the backup is incremental, then the first entry in the zip is a text * file containing a list of all the log files that are unchanged since the * previous backup. The remaining zip entries are the log files themselves, * which, for an incremental, only include those files that have changed. * * @param backupable * The underlying entity (storage, backend) to be backed up. * @param backupConfig * The configuration to use when performing the backup. * @throws DirectoryException * If a Directory Server error occurs. */ public void createBackup(final Backupable backupable, final BackupConfig backupConfig) throws DirectoryException { final NewBackupParams backupParams = new NewBackupParams(backupConfig); final CryptoEngine cryptoEngine = CryptoEngine.forCreation(backupConfig, backupParams); final NewBackupArchive newArchive = new NewBackupArchive(backendID, backupParams, cryptoEngine); BackupArchiveWriter archiveWriter = null; try { final ListIterator<Path> files = backupable.getFilesToBackup(); final Path rootDirectory = backupable.getDirectory().toPath(); archiveWriter = new BackupArchiveWriter(newArchive); if (files.hasNext()) { if (backupParams.isIncremental) { archiveWriter.writeUnchangedFiles(rootDirectory, files, backupConfig); } archiveWriter.writeChangedFiles(rootDirectory, files, backupConfig); } else { archiveWriter.writeEmptyPlaceHolder(); } } finally { closeArchiveWriter(archiveWriter, newArchive.getArchiveFilename(), backupParams.backupDir.getPath()); } newArchive.updateBackupDirectory(); if (backupConfig.isCancelled()) { // Remove the backup since it may be incomplete removeBackup(backupParams.backupDir, backupParams.backupID); } } /** * Restores a backupable entity from its backup, or verify the backup. * * @param backupable * The underlying entity (storage, backend) to be backed up. * @param restoreConfig * The configuration to use when performing the restore. * @throws DirectoryException * If a Directory Server error occurs. */ public void restoreBackup(Backupable backupable, RestoreConfig restoreConfig) throws DirectoryException { Path saveDirectory = null; if (!restoreConfig.verifyOnly()) { saveDirectory = backupable.beforeRestore(); } final String backupID = restoreConfig.getBackupID(); final ExistingBackupArchive existingArchive = new ExistingBackupArchive(backupID, restoreConfig.getBackupDirectory()); final Path restoreDirectory = getRestoreDirectory(backupable, backupID); if (existingArchive.hasDependencies()) { final BackupArchiveReader zipArchiveReader = new BackupArchiveReader(backupID, existingArchive); final Set<String> unchangedFilesToRestore = zipArchiveReader.readUnchangedDependentFiles(); final List<BackupInfo> dependencies = existingArchive.getBackupDependencies(); for (BackupInfo dependencyBackupInfo : dependencies) { restoreArchive(restoreDirectory, unchangedFilesToRestore, restoreConfig, backupable, dependencyBackupInfo); } } // Restore the final archive file. Set<String> filesToRestore = emptySet(); restoreArchive(restoreDirectory, filesToRestore, restoreConfig, backupable, existingArchive.getBackupInfo()); if (!restoreConfig.verifyOnly()) { backupable.afterRestore(restoreDirectory, saveDirectory); } } /** * Removes the specified backup if it is possible to do so. * * @param backupDir The backup directory structure with which the * specified backup is associated. * @param backupID The backup ID for the backup to be removed. * * @throws DirectoryException If it is not possible to remove the specified * backup for some reason (e.g., no such backup * exists or there are other backups that are * dependent upon it). */ public void removeBackup(BackupDirectory backupDir, String backupID) throws DirectoryException { ExistingBackupArchive archive = new ExistingBackupArchive(backupID, backupDir); archive.removeArchive(); } private Path getRestoreDirectory(Backupable backupable, String backupID) { File restoreDirectory = backupable.getDirectory(); if (!backupable.isDirectRestore()) { restoreDirectory = new File(restoreDirectory.getAbsoluteFile() + "-restore-" + backupID); } return restoreDirectory.toPath(); } private void closeArchiveWriter(BackupArchiveWriter archiveWriter, String backupFile, String backupPath) throws DirectoryException { if (archiveWriter != null) { try { archiveWriter.close(); } catch (Exception e) { logger.traceException(e); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_CLOSE_ZIP_STREAM.get(backupFile, backupPath, stackTraceToSingleLineString(e)), e); } } } /** * Restores the content of an archive file. * <p> * If set of files is not empty, only the specified files are restored. * If set of files is empty, all files are restored. * * If the archive is being restored as a dependency, then only files in the * specified set are restored, and the restored files are removed from the * set. Otherwise all files from the archive are restored, and files that are * to be found in dependencies are added to the set. * @param restoreDir * The directory in which files are to be restored. * @param filesToRestore * The set of files to restore. If empty, then all files are * restored. * @param restoreConfig * The restore configuration. * @param backupInfo * The backup containing the files to be restored. * * @throws DirectoryException * If a Directory Server error occurs. * @throws IOException * If an I/O exception occurs during the restore. */ private void restoreArchive(Path restoreDir, Set<String> filesToRestore, RestoreConfig restoreConfig, Backupable backupable, BackupInfo backupInfo) throws DirectoryException { String backupID = backupInfo.getBackupID(); String backupDirectoryPath = restoreConfig.getBackupDirectory().getPath(); BackupArchiveReader zipArchiveReader = new BackupArchiveReader(backupID, backupInfo, backupDirectoryPath); zipArchiveReader.restoreArchive(restoreDir, filesToRestore, restoreConfig, backupable); } /** Retrieves the full path of the archive file. */ private static File retrieveArchiveFile(BackupInfo backupInfo, String backupDirectoryPath) { Map<String,String> backupProperties = backupInfo.getBackupProperties(); String archiveFilename = backupProperties.get(BACKUP_PROPERTY_ARCHIVE_FILENAME); return new File(backupDirectoryPath, archiveFilename); } /** * Get the information for a given backup ID from the backup directory. * * @param backupDir The backup directory. * @param backupID The backup ID. * @return The backup information, never null. * @throws DirectoryException If the backup information cannot be found. */ private static BackupInfo getBackupInfo(BackupDirectory backupDir, String backupID) throws DirectoryException { BackupInfo backupInfo = backupDir.getBackupInfo(backupID); if (backupInfo == null) { LocalizableMessage message = ERR_BACKUP_MISSING_BACKUPID.get(backupID, backupDir.getPath()); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message); } return backupInfo; } /** * Helper method to build a list of files to backup, in the simple case where all files are located * under the provided directory. * * @param directory * The directory containing files to backup. * @param filter * The filter to select files to backup. * @param identifier * Identifier of the backed-up entity * @return the files to backup, which may be empty but never {@code null} * @throws DirectoryException * if an error occurs. */ public static List<Path> getFiles(File directory, FileFilter filter, String identifier) throws DirectoryException { File[] files = null; try { files = directory.listFiles(filter); } catch (Exception e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_LIST_LOG_FILES.get(directory.getAbsolutePath(), identifier), e); } if (files == null) { throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, ERR_BACKUP_CANNOT_LIST_LOG_FILES.get(directory.getAbsolutePath(), identifier)); } List<Path> paths = new ArrayList<>(); for (File file : files) { paths.add(file.toPath()); } return paths; } /** * Helper method to save all current files of the provided backupable entity, using * default behavior. * * @param backupable * The entity to backup. * @param identifier * Identifier of the backup * @return the directory where all files are saved. * @throws DirectoryException * If a problem occurs. */ public static Path saveCurrentFilesToDirectory(Backupable backupable, String identifier) throws DirectoryException { ListIterator<Path> filesToBackup = backupable.getFilesToBackup(); File rootDirectory = backupable.getDirectory(); String saveDirectory = rootDirectory.getAbsolutePath() + ".save"; BackupManager.saveFilesToDirectory(rootDirectory.toPath(), filesToBackup, saveDirectory, identifier); return Paths.get(saveDirectory); } /** * Helper method to move all provided files in a target directory created from * provided target base path, keeping relative path information relative to * root directory. * * @param rootDirectory * A directory which is an ancestor of all provided files. * @param files * The files to move. * @param targetBasePath * Base path of the target directory. Actual directory is built by * adding ".save" and a number, always ensuring that the directory is new. * @param identifier * Identifier of the backup * @return the actual directory where all files are saved. * @throws DirectoryException * If a problem occurs. */ public static Path saveFilesToDirectory(Path rootDirectory, ListIterator<Path> files, String targetBasePath, String identifier) throws DirectoryException { Path targetDirectory = null; try { targetDirectory = createDirectoryWithNumericSuffix(targetBasePath, identifier); while (files.hasNext()) { Path file = files.next(); Path relativeFilePath = rootDirectory.relativize(file); Path targetFile = targetDirectory.resolve(relativeFilePath); Files.createDirectories(targetFile.getParent()); Files.move(file, targetFile); } return targetDirectory; } catch (IOException e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_SAVE_FILES_BEFORE_RESTORE.get(rootDirectory, targetDirectory, identifier, stackTraceToSingleLineString(e)), e); } } /** * Creates a new directory based on the provided directory path, by adding a * suffix number that is guaranteed to be the highest. */ static Path createDirectoryWithNumericSuffix(final String baseDirectoryPath, String identifier) throws DirectoryException { try { int number = getHighestSuffixNumberForPath(baseDirectoryPath); String path = baseDirectoryPath + (number + 1); Path directory = Paths.get(path); Files.createDirectories(directory); return directory; } catch (IOException e) { throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), ERR_BACKUP_CANNOT_CREATE_SAVE_DIRECTORY.get(baseDirectoryPath, identifier, stackTraceToSingleLineString(e)), e); } } /** * Returns a number that correspond to the highest suffix number existing for the provided base path. * <p> * Example: given the following directory structure * <pre> * +--- someDir * | \--- directory * | \--- directory1 * | \--- directory2 * | \--- directory10 * </pre> * getHighestSuffixNumberForPath("directory") returns 10. * * @param basePath * A base path to a file or directory, without any suffix number. * @return the highest suffix number, or 0 if no suffix number exists * @throws IOException * if an error occurs. */ private static int getHighestSuffixNumberForPath(final String basePath) throws IOException { final File baseFile = new File(basePath); final File[] existingFiles = baseFile.getParentFile().listFiles(); final Pattern pattern = Pattern.compile(basePath + "\\d*"); int highestNumber = 0; for (File file : existingFiles) { final String name = file.getCanonicalPath(); if (pattern.matcher(name).matches()) { String numberAsString = name.substring(basePath.length()); int number = numberAsString.isEmpty() ? 0 : Integer.valueOf(numberAsString); highestNumber = number > highestNumber ? number : highestNumber; } } return highestNumber; } } opendj-server-legacy/src/messages/org/opends/messages/backend.properties
@@ -978,9 +978,6 @@ restored from the archive in directory %s ERR_BACKUP_MISSING_BACKUPID_407=The information for backup %s could \ not be found in the backup directory %s ERR_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR_408=An error occurred \ while attempting to update the backup descriptor file %s with information \ about the backup: %s ERR_SCHEMA_MODIFY_RULEID_CONFLICTS_FOR_ADD_DSR_409=Unable to add DIT \ structure rule %s because its rule identifier conflicts with existing DIT structure \ rule (%s) @@ -1035,3 +1032,9 @@ WARN_DISK_SPACE_FULL_THRESHOLD_CROSSED_432=Disk free space of %d bytes for directory %s is now below disk low \ threshold of %d bytes. Backend %s is now offline and will no longer accept any operations until sufficient \ disk space is restored ERR_BACKEND_LIST_FILES_TO_BACKUP_433=An error occurred while trying to \ list the files to backup for backend '%s': %s ERR_BACKEND_SWITCH_TO_APPEND_MODE_434=An error occurred while trying to \ switch to append mode for backend '%s': %s ERR_BACKEND_END_APPEND_MODE_435=An error occurred while trying to \ end append mode for backend '%s': %s opendj-server-legacy/src/messages/org/opends/messages/jeb.properties
@@ -112,48 +112,10 @@ pre-loading ERR_JEB_CACHE_PRELOAD_62=An error occurred while preloading the \ database cache for backend %s: %s ERR_JEB_BACKUP_CANNOT_GET_MAC_63=An error occurred while attempting to \ obtain the %s MAC provider to create the signed hash for the backup: %s ERR_JEB_BACKUP_CANNOT_GET_DIGEST_64=An error occurred while attempting \ to obtain the %s message digest to create the hash for the backup: %s ERR_JEB_BACKUP_CANNOT_CREATE_ARCHIVE_FILE_65=An error occurred while \ trying to create the database archive file %s in directory %s: %s ERR_JEB_BACKUP_CANNOT_GET_CIPHER_66=An error occurred while attempting \ to obtain the cipher to use to encrypt the backup: %s ERR_JEB_BACKUP_ZIP_COMMENT_67=%s backup %s of backend %s ERR_JEB_BACKUP_CANNOT_LIST_LOG_FILES_68=An error occurred while \ attempting to obtain a list of the files in directory %s to include in the \ database backup: %s ERR_JEB_BACKUP_CANNOT_WRITE_ARCHIVE_FILE_69=An error occurred while \ attempting to back up database file %s: %s ERR_JEB_BACKUP_CANNOT_CLOSE_ZIP_STREAM_70=An error occurred while \ trying to close the database archive file %s in directory %s: %s ERR_JEB_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR_71=An error occurred \ while attempting to update the backup descriptor file %s with information \ about the database backup: %s ERR_JEB_BACKUP_UNSIGNED_HASH_ERROR_72=The computed hash of backup %s \ is different to the value computed at time of backup ERR_JEB_BACKUP_SIGNED_HASH_ERROR_73=The computed signed hash of backup \ %s is different to the value computed at time of backup ERR_JEB_INCR_BACKUP_REQUIRES_FULL_74=A full backup must be taken \ before an incremental backup can be taken ERR_JEB_CANNOT_RENAME_RESTORE_DIRECTORY_75=The directory %s, \ containing the files restored from backup, could not be renamed to the \ backend directory %s ERR_JEB_INCR_BACKUP_FROM_WRONG_BASE_76=One of the following base \ backup IDs must be specified for the incremental backup: %s ERR_JEB_CANNOT_CREATE_BACKUP_TAG_FILE_77=The backup tag file %s could \ not be created in %s ERR_JEB_BACKUP_CANNOT_RESTORE_78=An error occurred while attempting to \ restore the files from backup %s: %s ERR_JEB_BACKUP_MISSING_BACKUPID_79=The information for backup %s could \ not be found in the backup directory %s NOTE_JEB_BACKUP_FILE_UNCHANGED_82=Not changed: %s NOTE_JEB_BACKUP_CLEANER_ACTIVITY_83=Including %s additional log file(s) due \ to cleaner activity NOTE_JEB_BACKUP_VERIFY_FILE_84=Verifying: %s NOTE_JEB_BACKUP_RESTORED_FILE_85=Restored: %s (size %d) NOTE_JEB_BACKUP_ARCHIVED_FILE_86=Archived: %s NOTE_JEB_EXPORT_FINAL_STATUS_87=Exported %d entries and skipped %d in %d \ seconds (average rate %.1f/sec) NOTE_JEB_EXPORT_PROGRESS_REPORT_88=Exported %d records and skipped %d (recent \ opendj-server-legacy/src/messages/org/opends/messages/utility.properties
@@ -573,3 +573,47 @@ enter a valid integer ERR_ARG_SUBCOMMAND_INVALID_303=Invalid subcommand WARN_UNABLE_TO_USE_FILESYSTEM_API_304=Unable to gather information on Filesystem APIs, disk monitoring will be verbose ERR_BACKUP_CANNOT_GET_MAC_305=An error occurred while attempting to \ obtain the %s MAC provider to create the signed hash for the backup: %s ERR_BACKUP_CANNOT_GET_DIGEST_306=An error occurred while attempting \ to obtain the %s message digest to create the hash for the backup: %s ERR_BACKUP_CANNOT_CREATE_ARCHIVE_FILE_307=An error occurred while \ trying to create the archive file %s in directory %s for the backup %s: %s ERR_BACKUP_CANNOT_GET_CIPHER_308=An error occurred while attempting \ to obtain the cipher to use to encrypt the backup: %s ERR_BACKUP_ZIP_COMMENT_309=%s backup %s ERR_BACKUP_CANNOT_LIST_LOG_FILES_310=An error occurred while \ attempting to obtain a list of the files in directory %s to include in the \ backup: %s ERR_BACKUP_CANNOT_WRITE_ARCHIVE_FILE_311=An error occurred while \ attempting to back up file %s of backup %s: %s ERR_BACKUP_CANNOT_CLOSE_ZIP_STREAM_312=An error occurred while \ trying to close the archive file %s in directory %s: %s ERR_BACKUP_UNSIGNED_HASH_ERROR_313=The computed hash of backup %s \ is different to the value computed at time of backup ERR_BACKUP_SIGNED_HASH_ERROR_314=The computed signed hash of backup \ %s is different to the value computed at time of backup ERR_CANNOT_RENAME_RESTORE_DIRECTORY_315=The directory %s, \ containing the files restored from backup, could not be renamed to the \ directory %s ERR_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR_316=An error occurred \ while attempting to update the backup descriptor file %s with information \ about the backup: %s ERR_BACKUP_CANNOT_RESTORE_317=An error occurred while attempting to \ restore the files from backup %s: %s NOTE_BACKUP_FILE_UNCHANGED_318=Backup file has not changed: %s NOTE_BACKUP_VERIFY_FILE_319=Verifying backup file: %s NOTE_BACKUP_RESTORED_FILE_320=Restored backup file: %s (size %d) NOTE_BACKUP_ARCHIVED_FILE_321=Archived backup file: %s WARN_BACKUPDB_INCREMENTAL_NOT_FOUND_DOING_NORMAL_322=Could not find any \ backup in '%s'. A full backup will be executed ERR_BACKUP_CANNOT_GET_MAC_KEY_ID_323=An error occurred while attempting to \ obtain the MAC key ID to create the signed hash for the backup %s : %s ERR_BACKUP_CANNOT_CREATE_DIRECTORY_TO_RESTORE_FILE_324=An error occurred while \ attempting to create a directory to restore the file %s for backup of %s ERR_BACKUP_CANNOT_SAVE_FILES_BEFORE_RESTORE_325=An error occurred while \ attempting to save files from root directory %s to target directory %s, for \ backup of %s : %s ERR_BACKUP_CANNOT_CREATE_SAVE_DIRECTORY_326=An error occurred while \ attempting to create a save directory with base path %s before restore of \ backup of %s: %s opendj-server-legacy/src/test/java/org/opends/server/backends/jeb/TestBackendImpl.java
@@ -26,6 +26,10 @@ */ package org.opends.server.backends.jeb; import java.io.File; import java.io.FileOutputStream; import java.io.OutputStream; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -43,6 +47,7 @@ import org.opends.server.admin.server.AdminTestCaseUtils; import org.opends.server.admin.std.meta.LocalDBBackendCfgDefn; import org.opends.server.admin.std.server.LocalDBBackendCfg; import org.opends.server.backends.jeb.BackendImpl.JELogFilesIterator; import org.opends.server.controls.SubtreeDeleteControl; import org.opends.server.core.DeleteOperationBasis; import org.opends.server.core.DirectoryServer; @@ -63,6 +68,7 @@ import org.opends.server.types.RDN; import org.opends.server.types.SearchResultEntry; import org.opends.server.util.Base64; import org.opends.server.util.StaticUtils; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; @@ -81,6 +87,7 @@ import static org.opends.server.protocols.internal.InternalClientConnection.*; import static org.opends.server.protocols.internal.Requests.*; import static org.opends.server.types.Attributes.*; import static org.opends.server.util.StaticUtils.*; import static org.testng.Assert.*; /** @@ -1406,4 +1413,111 @@ } } @Test public void testJELogFilesIterator() throws Exception { File rootDir = TestCaseUtils.createTemporaryDirectory("jeLogFilesIterator"); try { createLogFilesInDirectory(rootDir.toPath(), "content", 0, 3); JELogFilesIterator iterator = new BackendImpl.JELogFilesIterator(rootDir, "backendID"); assertLogFilesIterator(iterator, rootDir, 0, 2); assertThat(iterator.hasNext()).isFalse(); } finally { StaticUtils.recursiveDelete(rootDir); } } @Test public void testJELogFilesIteratorWhenFileIsDeletedAndNewOneAdded() throws Exception { File rootDir = TestCaseUtils.createTemporaryDirectory("jeLogFilesIteratorDelete"); try { createLogFilesInDirectory(rootDir.toPath(), "content", 0, 2); JELogFilesIterator iterator = new BackendImpl.JELogFilesIterator(rootDir, "backendID"); assertLogFilesIterator(iterator, rootDir, 0, 1); assertThat(iterator.hasNext()).isFalse(); // delete first file log0.jdb and create a new one log2.jdb new File(rootDir, "log0.jdb").delete(); createLogFilesInDirectory(rootDir.toPath(), "content", 2, 1); assertLogFilesIterator(iterator, rootDir, 2, 2); assertThat(iterator.hasNext()).isFalse(); } finally { StaticUtils.recursiveDelete(rootDir); } } @Test public void testJELogFilesIteratorWhenFileIsDeletedAndLastOneHasLargerSize() throws Exception { File rootDir = TestCaseUtils.createTemporaryDirectory("jeLogFilesIteratorDelete2"); try { createLogFilesInDirectory(rootDir.toPath(), "content", 0, 2); JELogFilesIterator iterator = new BackendImpl.JELogFilesIterator(rootDir, "backendID"); assertLogFilesIterator(iterator, rootDir, 0, 1); // delete first file log0.jdb and update last one with larger content new File(rootDir, "log0.jdb").delete(); new File(rootDir, "log1.jdb").delete(); createLogFilesInDirectory(rootDir.toPath(), "morecontent", 1, 1); assertLogFilesIterator(iterator, rootDir, 1, 1); assertThat(iterator.hasNext()).isFalse(); } finally { StaticUtils.recursiveDelete(rootDir); } } private void assertLogFilesIterator(JELogFilesIterator iterator, File rootDir, int from, int to) { for (int i = from; i <= to; i++) { assertThat(iterator.hasNext()).as("hasNext expected to be true for " + i).isTrue(); assertThat(iterator.next().toFile()).isEqualTo(new File(rootDir, "log" + i + ".jdb")); } } /** * Creates dummy "logN.jdb" (where N is a number) files in given directory * with provided label as content for files. */ private List<Path> createLogFilesInDirectory(Path directory, String label, int start, int numberOfFiles) throws Exception { List<Path> files = new ArrayList<>(); for (int i = start; i < start+numberOfFiles; i++) { String filename = "log" + i + ".jdb"; Path file = directory.resolve(filename); createFile(file, StaticUtils.getBytes(label)); files.add(file); } return files; } private void createFile(Path file, byte[] content) throws Exception { OutputStream output = new FileOutputStream(file.toFile(), false); try { output.write(content); } finally { close(output); } } } opendj-server-legacy/src/test/java/org/opends/server/tasks/TestBackupAndRestore.java
@@ -22,7 +22,7 @@ * * * Copyright 2006-2008 Sun Microsystems, Inc. * Portions Copyright 2014 ForgeRock AS. * Portions Copyright 2014-2015 ForgeRock AS. */ package org.opends.server.tasks; @@ -128,6 +128,12 @@ TaskState.COMPLETED_SUCCESSFULLY }, { // Restore a SchemaBackend TestCaseUtils.makeEntry(restoreTask( "ds-backup-directory-path: bak" + File.separator + "schema")), TaskState.COMPLETED_SUCCESSFULLY }, { // Non-existent restore directory-path. TestCaseUtils.makeEntry(restoreTask( "ds-backup-directory-path: missing" opendj-server-legacy/src/test/java/org/opends/server/util/BackupManagerTestCase.java
New file @@ -0,0 +1,376 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt * or http://forgerock.org/license/CDDLv1.0.html. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at legal-notices/CDDLv1_0.txt. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: * Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END * * * Copyright 2015 ForgeRock AS */ package org.opends.server.util; import static org.assertj.core.api.Assertions.*; import static org.mockito.Mockito.*; import static org.opends.server.util.StaticUtils.*; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.ListIterator; import org.opends.server.DirectoryServerTestCase; import org.opends.server.TestCaseUtils; import org.opends.server.api.Backupable; import org.opends.server.types.BackupConfig; import org.opends.server.types.BackupDirectory; import org.opends.server.types.DN; import org.opends.server.types.RestoreConfig; import org.opends.server.util.StaticUtils; import org.testng.Reporter; import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @SuppressWarnings("javadoc") @Test(groups = { "precommit" }, sequential = true) public class BackupManagerTestCase extends DirectoryServerTestCase { private static final String ENTRY_DN = "dc=example,dc=com"; private static final String FILE_NAME_PREFIX = "file_"; private static final String BACKEND_ID = "backendID"; private static final String BACKUP_ID = "backupID"; @BeforeClass public void setUp() throws Exception { // Need the schema to be available, so make sure the server is started. // startFakeServer() is insufficient because we also need the CryptoManager to be initialized TestCaseUtils.startServer(); } @DataProvider Object[][] backupData() throws Exception { // For each case is provided // - a label identifying the case (not used in method but allow to identify easily the case in IDE) // - a mock of a backupable (building the mock also involves creating directory and files to backup) // - a backup config // - a restore config String label0 = "nohash"; Backupable backupable0 = buildBackupable(createSourceDirectory(label0), 3); BackupDirectory backupDir0 = buildBackupDir(label0); BackupConfig backupConfig0 = new BackupConfig(backupDir0, BACKUP_ID, false); RestoreConfig restoreConfig0 = new RestoreConfig(backupDir0, BACKUP_ID, false); String label1 = "unsignedhash"; Backupable backupable1 = buildBackupable(createSourceDirectory(label1), 3); BackupDirectory backupDir1 = buildBackupDir(label1); BackupConfig backupConfig1 = new BackupConfig(backupDir1, BACKUP_ID, false); backupConfig1.setHashData(true); RestoreConfig restoreConfig1 = new RestoreConfig(backupDir1, BACKUP_ID, false); String label2 = "signedhash"; Backupable backupable2 = buildBackupable(createSourceDirectory(label2), 3); BackupDirectory backupDir2 = buildBackupDir(label2); BackupConfig backupConfig2 = new BackupConfig(backupDir2, BACKUP_ID, false); backupConfig2.setHashData(true); backupConfig2.setSignHash(true); RestoreConfig restoreConfig2 = new RestoreConfig(backupDir2, BACKUP_ID, false); String label3 = "encrypted_compressed"; Backupable backupable3 = buildBackupable(createSourceDirectory(label3), 3); BackupDirectory backupDir3 = buildBackupDir(label3); BackupConfig backupConfig3 = new BackupConfig(backupDir3, BACKUP_ID, false); backupConfig3.setEncryptData(true); backupConfig3.setCompressData(true); RestoreConfig restoreConfig3 = new RestoreConfig(backupDir3, BACKUP_ID, false); // should perform a normal backup in absence of incremental base ID String label4 = "incremental_without_incrementalBaseID"; Backupable backupable4 = buildBackupable(createSourceDirectory(label4), 3); BackupDirectory backupDir4 = buildBackupDir(label4); BackupConfig backupConfig4 = new BackupConfig(backupDir4, BACKUP_ID, true); backupConfig4.setHashData(true); RestoreConfig restoreConfig4 = new RestoreConfig(backupDir4, BACKUP_ID, false); String label5 = "noFiles"; Backupable backupable5 = buildBackupable(createSourceDirectory(label5), 0); BackupDirectory backupDir5 = buildBackupDir(label5); BackupConfig backupConfig5 = new BackupConfig(backupDir5, BACKUP_ID, false); RestoreConfig restoreConfig5 = new RestoreConfig(backupDir5, BACKUP_ID, false); String label6 = "multiple_directories"; Backupable backupable6 = buildBackupableForMultipleDirectoriesCase(createSourceDirectory(label6), 3); BackupDirectory backupDir6 = buildBackupDir(label6); BackupConfig backupConfig6 = new BackupConfig(backupDir6, BACKUP_ID, false); RestoreConfig restoreConfig6 = new RestoreConfig(backupDir6, BACKUP_ID, false); return new Object[][] { { label0, backupable0, backupConfig0, restoreConfig0 }, { label1, backupable1, backupConfig1, restoreConfig1 }, { label2, backupable2, backupConfig2, restoreConfig2 }, { label3, backupable3, backupConfig3, restoreConfig3 }, { label4, backupable4, backupConfig4, restoreConfig4 }, { label5, backupable5, backupConfig5, restoreConfig5 }, { label6, backupable6, backupConfig6, restoreConfig6 }, }; } /** * This test encompasses creation, restore and remove of a backup. * * It allows to ensure that a backup can actually be restored. */ @Test(dataProvider="backupData") public void testCreateBackupThenRestoreThenRemove(String label, Backupable backupable, BackupConfig backupConfig, RestoreConfig restoreConfig) throws Exception { BackupManager backupManager = new BackupManager(BACKEND_ID); // create and check archive files backupManager.createBackup(backupable, backupConfig); String backupPath = backupConfig.getBackupDirectory().getPath(); assertThat(new File(backupPath, getArchiveFileName(BACKUP_ID))).exists(); assertThat(new File(backupPath, "backup.info")).exists(); // change content of directory to later check that backup is recovering everything removeBackedUpFiles(backupable); // restore and check list of files backupManager.restoreBackup(backupable, restoreConfig); assertAllFilesAreRestoredCorrectly(backupable); // remove the backup archive and check backupManager.removeBackup(backupConfig.getBackupDirectory(), BACKUP_ID); assertThat(new File(backupPath, getArchiveFileName(BACKUP_ID))).doesNotExist(); //cleanDirectories(sourceDirectory, backupPath); } /** * This test encompasses creation, restore and remove of an incremental backup. * * It allows to ensure that a backup can actually be restored. */ @Test() public void testCreateIncrementalBackupThenRestoreThenRemove() throws Exception { Path sourceDirectory = createSourceDirectory("incremental"); BackupDirectory backupDir = buildBackupDir("incremental"); BackupManager backupManager = new BackupManager(BACKEND_ID); // perform first backup with 2 files Backupable backupable0 = buildBackupable(sourceDirectory, 2); String initialBackupId = BACKUP_ID + "_0"; BackupConfig backupConfig0 = new BackupConfig(backupDir, initialBackupId, true); backupManager.createBackup(backupable0, backupConfig0); // check archive and info file String backupPath = backupDir.getPath(); assertThat(new File(backupPath, getArchiveFileName(initialBackupId))).exists(); assertThat(new File(backupPath, "backup.info")).exists(); // perform second backup with 4 files (2 initial files plus 2 new files) // now backup with id "backupID" should depend on backup with id "backupID_0" Backupable backupable1 = buildBackupable(sourceDirectory, 4); BackupConfig backupConfig1 = new BackupConfig(backupDir, BACKUP_ID, true); backupManager.createBackup(backupable1, backupConfig1); assertThat(new File(backupPath, getArchiveFileName(initialBackupId))).exists(); assertThat(new File(backupPath, getArchiveFileName(BACKUP_ID))).exists(); assertThat(new File(backupPath, "backup.info")).exists(); assertThat(new File(backupPath, "backup.info.save")).exists(); // change content of directory to later check that backup is recovering everything removeBackedUpFiles(backupable1); // restore and check list of files RestoreConfig restoreConfig = new RestoreConfig(backupDir, BACKUP_ID, false); backupManager.restoreBackup(backupable1, restoreConfig); assertAllFilesAreRestoredCorrectly(backupable1); // remove the backup archive and check backupManager.removeBackup(backupDir, BACKUP_ID); assertThat(new File(backupPath, getArchiveFileName(BACKUP_ID))).doesNotExist(); backupManager.removeBackup(backupDir, initialBackupId); assertThat(new File(backupPath, getArchiveFileName(initialBackupId))).doesNotExist(); cleanDirectories(sourceDirectory, backupPath); } @Test public void testCreateDirectoryWithNumericSuffix() throws Exception { File directory = TestCaseUtils.createTemporaryDirectory("createDirectory-"); String dirPath = directory.getAbsolutePath(); // delete the directory to ensure creation works fine when there is no directory directory.delete(); Path dir = BackupManager.createDirectoryWithNumericSuffix(dirPath, BACKUP_ID); assertThat(dir.toString()).isEqualTo(dirPath + "1"); Path dir2 = BackupManager.createDirectoryWithNumericSuffix(dirPath, BACKUP_ID); assertThat(dir2.toString()).isEqualTo(dirPath + "2"); recursiveDelete(dir.toFile()); recursiveDelete(dir2.toFile()); } @Test public void testSaveFilesToDirectory() throws Exception { Backupable backupable = buildBackupableForMultipleDirectoriesCase(createSourceDirectory("saveFiles-root"), 2); File rootDir = backupable.getDirectory(); File targetDir = TestCaseUtils.createTemporaryDirectory("saveFiles-target"); File actualTargetDir = BackupManager.saveFilesToDirectory(rootDir.toPath(), backupable.getFilesToBackup(), targetDir.getCanonicalPath(), BACKUP_ID).toFile(); // all files should have been saved in the target directory, with correct sub-path assertThat(new File(actualTargetDir, FILE_NAME_PREFIX+0)).exists(); assertThat(new File(actualTargetDir, FILE_NAME_PREFIX+1)).exists(); File subdir = new File(actualTargetDir, "subdir"); assertThat(new File(subdir, FILE_NAME_PREFIX+0)).exists(); assertThat(new File(subdir, FILE_NAME_PREFIX+1)).exists(); recursiveDelete(rootDir); recursiveDelete(targetDir); recursiveDelete(actualTargetDir); } private void cleanDirectories(Path sourceDirectory, String backupPath) { StaticUtils.recursiveDelete(sourceDirectory.toFile()); StaticUtils.recursiveDelete(new File(backupPath)); } private String getArchiveFileName(String backupId) { return "backup-" + BACKEND_ID + "-" + backupId; } private void assertAllFilesAreRestoredCorrectly(Backupable backupable) throws Exception { ListIterator<Path> files = backupable.getFilesToBackup(); while (files.hasNext()) { Path file = files.next(); assertThat(file.toFile()).exists(); assertThat(file.toFile()).hasContent(file.getFileName().toString()); } } private void removeBackedUpFiles(Backupable backupable) throws Exception { ListIterator<Path> it = backupable.getFilesToBackup(); while (it.hasNext()) { Path file = it.next(); Files.deleteIfExists(file); } } private BackupDirectory buildBackupDir(String label) throws Exception { File backupDirectory = TestCaseUtils.createTemporaryDirectory("backupDirectory-" + label + "-"); Reporter.log("Create backup directory:" + backupDirectory, true); BackupDirectory backupDir = new BackupDirectory(backupDirectory.getAbsolutePath(), DN.valueOf(ENTRY_DN)); return backupDir; } private Backupable buildBackupable(Path sourceDirectory, int numberOfFiles) throws Exception { List<Path> files = createFilesInDirectoryToBackup(sourceDirectory, numberOfFiles); Backupable backupable = mock(Backupable.class); when(backupable.getDirectory()).thenReturn(sourceDirectory.toFile()); when(backupable.getFilesToBackup()).thenReturn(files.listIterator()); when(backupable.isDirectRestore()).thenReturn(true); return backupable; } /** * Create files in source directory + additional files under a subdirectory of source directory */ private Backupable buildBackupableForMultipleDirectoriesCase(Path sourceDirectory, int numberOfFiles) throws Exception { List<Path> files = createFilesInDirectoryToBackup(sourceDirectory, numberOfFiles); // create an additional subdirectory with files Path subdir = sourceDirectory.resolve("subdir"); Files.createDirectory(subdir); List<Path> subdirFiles = createFilesInDirectoryToBackup(subdir, numberOfFiles); files.addAll(subdirFiles); Backupable backupable = mock(Backupable.class); when(backupable.getDirectory()).thenReturn(sourceDirectory.toFile()); when(backupable.getFilesToBackup()).thenReturn(files.listIterator()); return backupable; } private Path createSourceDirectory(String label) throws IOException { File sourceDirectory = TestCaseUtils.createTemporaryDirectory("dirToBackup-" + label + "-"); Reporter.log("Create directory to backup:" + sourceDirectory, true); return sourceDirectory.toPath(); } private List<Path> createFilesInDirectoryToBackup(Path directory, int numberOfFiles) throws Exception { List<Path> files = new ArrayList<>(); for (int i = 0; i < numberOfFiles; i++) { String filename = FILE_NAME_PREFIX + i; Path file = directory.resolve(filename); createFile(file, StaticUtils.getBytes(filename)); files.add(file); } return files; } private void createFile(Path file, byte[] content) throws Exception { OutputStream output = new FileOutputStream(file.toFile(), false); try { output.write(content); } finally { close(output); } } }