mirror of https://github.com/OpenIdentityPlatform/OpenDJ.git

jvergara
13.20.2009 8140b0a19865e2754ad927906a607a86533f426c
Fix for issue 4106 (dsreplication should allow to merge disjoint replication topologies)
dsreplication allows to merge disjoint topologies. The limitations on the topologies that can be merged are described in the issue report.

Extras:
- Fix a bug in the code of dsreplication that prevented the cn=schema replication domain configurations to have an updated list of replication servers.
- Fix a regression introduced that caused the code not to update properly the replication server list of the domains of previously configured servers.
- Fix a bug in dsreplication status that prevented displaying a message informing that there was no replication configured.
- Update the code of disableAll to cleanup the contents of the truststore.
11 files modified
1123 ■■■■ changed files
opendj-sdk/opends/src/ads/org/opends/admin/ads/ADSContext.java 145 ●●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/ads/org/opends/admin/ads/ADSContextException.java 18 ●●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/ads/org/opends/admin/ads/ReplicaDescriptor.java 10 ●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/ads/org/opends/admin/ads/ServerDescriptor.java 41 ●●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/messages/messages/admin.properties 6 ●●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/messages/messages/admin_tool.properties 62 ●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/messages/messages/quicksetup.properties 7 ●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/server/org/opends/server/admin/client/cli/DsFrameworkCliReturnCode.java 11 ●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/server/org/opends/server/tools/dsreplication/ReplicationCliArgumentParser.java 2 ●●● patch | view | raw | blame | history
opendj-sdk/opends/src/server/org/opends/server/tools/dsreplication/ReplicationCliMain.java 702 ●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/server/org/opends/server/util/cli/ConsoleApplication.java 119 ●●●●● patch | view | raw | blame | history
opendj-sdk/opends/src/ads/org/opends/admin/ads/ADSContext.java
@@ -27,6 +27,8 @@
package org.opends.admin.ads;
import static org.opends.messages.QuickSetupMessages.*;
import java.io.File;
import java.util.LinkedHashSet;
import java.util.LinkedList;
@@ -34,6 +36,8 @@
import java.util.HashSet;
import java.util.Map;
import java.util.HashMap;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -58,6 +62,11 @@
import javax.naming.ldap.Control;
import javax.naming.ldap.LdapContext;
import org.opends.admin.ads.util.ConnectionUtils;
import org.opends.messages.Message;
import org.opends.quicksetup.Constants;
import org.opends.quicksetup.util.Utils;
/**
 * Class used to update and read the contents of the Administration Data.
@@ -2408,4 +2417,140 @@
    }
    return certificateMap;
  }
  /**
   * Merge the contents of this ADSContext with the contents of the provided
   * ADSContext.  Note that only the contents of this ADSContext will be
   * updated.
   * @param adsCtx the other ADSContext to merge the contents with.
   * @throws ADSContextException if there was an error during the merge.
   */
  public void mergeWithRegistry(ADSContext adsCtx) throws ADSContextException
  {
    try
    {
      // Merge administrators.
      mergeAdministrators(adsCtx);
      // Merge groups.
      mergeServerGroups(adsCtx);
      // Merge servers.
      mergeServers(adsCtx);
    }
    catch (ADSContextException adce)
    {
      Message msg = ERR_ADS_MERGE.get(
          ConnectionUtils.getHostPort(getDirContext()),
          ConnectionUtils.getHostPort(adsCtx.getDirContext()),
          adce.getMessageObject());
      throw new ADSContextException(
          ADSContextException.ErrorType.ERROR_MERGING, msg, adce);
    }
  }
  /**
   * Merge the administrator contents of this ADSContext with the contents of
   * the provided ADSContext.  Note that only the contents of this ADSContext
   * will be updated.
   * @param adsCtx the other ADSContext to merge the contents with.
   * @throws ADSContextException if there was an error during the merge.
   */
  private void mergeAdministrators(ADSContext adsCtx) throws ADSContextException
  {
    Set<Map<AdministratorProperty, Object>> admins2 =
      adsCtx.readAdministratorRegistry();
    SortedSet<String> notDefinedAdmins = new TreeSet<String>();
    for (Map<AdministratorProperty, Object> admin2 : admins2)
    {
      if (!isAdministratorAlreadyRegistered(admin2))
      {
        String uid = (String)admin2.get(AdministratorProperty.UID);
        notDefinedAdmins.add(uid);
      }
    }
    if (!notDefinedAdmins.isEmpty())
    {
      Message msg = ERR_ADS_ADMINISTRATOR_MERGE.get(
          ConnectionUtils.getHostPort(adsCtx.getDirContext()),
          ConnectionUtils.getHostPort(getDirContext()),
          Utils.getStringFromCollection(notDefinedAdmins,
              Constants.LINE_SEPARATOR),
          ConnectionUtils.getHostPort(getDirContext()));
      throw new ADSContextException(ADSContextException.ErrorType.ERROR_MERGING,
          msg, null);
    }
  }
  /**
   * Merge the groups contents of this ADSContext with the contents of the
   * provided ADSContext.  Note that only the contents of this ADSContext will
   * be updated.
   * @param adsCtx the other ADSContext to merge the contents with.
   * @throws ADSContextException if there was an error during the merge.
   */
  private void mergeServerGroups(ADSContext adsCtx) throws ADSContextException
  {
    Set<Map<ServerGroupProperty, Object>> serverGroups1 =
      readServerGroupRegistry();
    Set<Map<ServerGroupProperty, Object>> serverGroups2 =
      adsCtx.readServerGroupRegistry();
    for (Map<ServerGroupProperty, Object> group2 : serverGroups2)
    {
      Map<ServerGroupProperty, Object> group1 = null;
      String uid2 = (String)group2.get(ServerGroupProperty.UID);
      for (Map<ServerGroupProperty, Object> gr : serverGroups1)
      {
        String uid1 = (String)gr.get(ServerGroupProperty.UID);
        if (uid1.equalsIgnoreCase(uid2))
        {
          group1 = gr;
          break;
        }
      }
      if (group1 != null)
      {
        // Merge the members, keep the description on this ADS.
        Set<String> member1List = getServerGroupMemberList(uid2);
        if (member1List == null)
        {
          member1List = new HashSet<String>();
        }
        Set<String> member2List = adsCtx.getServerGroupMemberList(uid2);
        if (member2List != null && !member2List.isEmpty())
        {
          member1List.addAll(member2List);
          Map<ServerGroupProperty, Object> newProperties =
            new HashMap<ServerGroupProperty, Object>();
          newProperties.put(ServerGroupProperty.MEMBERS, member1List);
          updateServerGroup(uid2, newProperties);
        }
      }
      else
      {
        createServerGroup(group2);
      }
    }
  }
  /**
   * Merge the server contents of this ADSContext with the contents of the
   * provided ADSContext.  Note that only the contents of this ADSContext will
   * be updated.
   * @param adsCtx the other ADSContext to merge the contents with.
   * @throws ADSContextException if there was an error during the merge.
   */
  private void mergeServers(ADSContext adsCtx) throws ADSContextException
  {
    Set<Map<ServerProperty, Object>> servers2 = adsCtx.readServerRegistry();
    for (Map<ServerProperty, Object> server2 : servers2)
    {
      if (!isServerAlreadyRegistered(server2))
      {
        registerServer(server2);
      }
    }
  }
}
opendj-sdk/opends/src/ads/org/opends/admin/ads/ADSContextException.java
@@ -108,6 +108,10 @@
     */
    UNEXPECTED_ADS_BACKEND_TYPE(),
    /**
     * Error merging with another ADSContext.
     */
    ERROR_MERGING,
    /**
     * Unexpected error (potential bug).
     */
    ERROR_UNEXPECTED();
@@ -133,7 +137,19 @@
   */
  public ADSContextException(ErrorType error, Throwable x)
  {
    super(getMessage(error, x), x);
    this(error, getMessage(error, x), x);
  }
  /**
   * Creates an ADSContextException of the given error type with the provided
   * error cause and message.
   * @param error the error type.
   * @param msg the message describing the error.
   * @param x the throwable that generated this exception.
   */
  public ADSContextException(ErrorType error, Message msg, Throwable x)
  {
    super(msg);
    this.error = error;
    this.embeddedException = x;
    toString = "ADSContextException: error type "+error+".";
opendj-sdk/opends/src/ads/org/opends/admin/ads/ReplicaDescriptor.java
@@ -22,7 +22,7 @@
 * CDDL HEADER END
 *
 *
 *      Copyright 2007-2008 Sun Microsystems, Inc.
 *      Copyright 2007-2009 Sun Microsystems, Inc.
 */
package org.opends.admin.ads;
@@ -143,9 +143,9 @@
  }
  /**
   * Returns the replication server id for the replication domain associated
   * Returns the replication id for the replication domain associated
   * with this replica.
   * @return the replication server id for the replication domain associated
   * @return the replication id for the replication domain associated
   * with this replica.
   */
  public int getReplicationId()
@@ -154,9 +154,9 @@
  }
  /**
   * Sets the replication server id for the replication domain associated
   * Sets the replication id for the replication domain associated
   * with this replica.
   * @param replicationId the replication server id for the replication domain
   * @param replicationId the replication id for the replication domain
   * associated with this replica.
   */
  public void setReplicationId(int replicationId)
opendj-sdk/opends/src/ads/org/opends/admin/ads/ServerDescriptor.java
@@ -60,6 +60,8 @@
    new HashMap<ServerProperty, Object>();
  private TopologyCacheException lastException;
  private static final String TRUSTSTORE_DN = "cn=ads-truststore";
  private static final Logger LOG =
    Logger.getLogger(ServerDescriptor.class.getName());
@@ -1337,7 +1339,6 @@
  {
    /* TODO: this DN is declared in some core constants file. Create a
       constants file for the installer and import it into the core. */
    final String truststoreDnStr = "cn=ads-truststore";
    final Attribute oc = new BasicAttribute("objectclass");
    oc.add("top");
    oc.add("ds-cfg-instance-key");
@@ -1353,7 +1354,7 @@
                      getAttributeName() + ";binary", keyEntry.getValue()));
      final LdapName keyDn = new LdapName((new StringBuilder(rdnAttr.getID()))
              .append("=").append(Rdn.escapeValue(rdnAttr.get())).append(",")
              .append(truststoreDnStr).toString());
              .append(TRUSTSTORE_DN).toString());
      try {
        ctx.createSubcontext(keyDn, keyAttrs).close();
      }
@@ -1365,6 +1366,42 @@
  }
  /**
   * Cleans up the contents of the ads truststore.
   *
   * @param ctx the bound instance.
   * @throws NamingException in case an error occurs while updating the
   * instance's ads-truststore via LDAP.
   */
  public static void cleanAdsTrustStore(InitialLdapContext ctx)
  throws NamingException
  {
    try
    {
      SearchControls sc = new SearchControls();
      sc.setSearchScope(SearchControls.ONELEVEL_SCOPE);
      String[] attList = {"dn"};
      sc.setReturningAttributes(attList);
      NamingEnumeration<SearchResult> ne = ctx.search(TRUSTSTORE_DN,
          "(objectclass=ds-cfg-instance-key)", sc);
      ArrayList<String> dnsToDelete = new ArrayList<String>();
      while (ne.hasMore())
      {
        SearchResult sr = ne.next();
        dnsToDelete.add(sr.getName()+","+TRUSTSTORE_DN);
      }
      for (String dn : dnsToDelete)
      {
        ctx.destroySubcontext(dn);
      }
    }
    catch (NameNotFoundException nnfe)
    {
      // Ignore
      LOG.log(Level.WARNING, "Error cleaning truststore: "+nnfe, nnfe);
    }
  }
  /**
   * Returns the values of the ds-base-dn-entry count attributes for the given
   * backend monitor entry using the provided InitialLdapContext.
   * @param ctx the InitialLdapContext to use to update the configuration.
opendj-sdk/opends/src/messages/messages/admin.properties
@@ -20,7 +20,7 @@
#
# CDDL HEADER END
#
#      Copyright 2006-2008 Sun Microsystems, Inc.
#      Copyright 2006-2009 Sun Microsystems, Inc.
@@ -279,7 +279,7 @@
SEVERE_ERR_CLASS_LOADER_CANNOT_READ_MANIFEST_FILE_120=An unexpected \
 error occurred while reading the manifest file: %s
SEVERE_ERR_CLASS_LOADER_CANNOT_LOAD_CLASS_121=An error occurred while \
 attempting to load class "%s": %s
 attempting to load class "%s": %s
SEVERE_ERR_CLASS_LOADER_CANNOT_FIND_GET_INSTANCE_METHOD_122=Unable to \
 to find the getInstance() method in the managed object definition \
 class "%s": %s
@@ -318,4 +318,6 @@
files are missing: %s
SEVERE_WARN_ADMIN_SET_PERMISSIONS_FAILED_137=Failed to set permissions \
 on file %s
FATAL_ERR_ADMIN_MERGING_138=The registry information of the servers could not \
 be merged
opendj-sdk/opends/src/messages/messages/admin_tool.properties
@@ -680,7 +680,7 @@
 also the replication server (changelog and replication port) to be disabled \
 you must also specify the '--%s' or '--%s' argument.
INFO_REPLICATION_DISABLE_ALL_SUFFIXES_DISABLE_REPLICATION_SERVER=You have \
 chosen to disable all the replicated base DNs on the server '%s'.  Do you \
 chosen to disable all the replicated base DN's on the server '%s'.  Do you \
 want to disable also the replication port '%d'?
INFO_DISABLE_REPLICATION_ONE_POINT_OF_FAILURE=You have decided to disable the \
 replication server (replication changelog).  After disabling the replication \
@@ -702,7 +702,8 @@
 server is required in a replication topology and this is the last replication \
 server for the following suffixes:%n%s%nReplication will be disabled for \
 these servers.%nDo you want to continue?
INFO_REPLICATION_DISABLE_ADS_CONTENTS=Removing registration information
INFO_REPLICATION_REMOVE_ADS_CONTENTS=Removing registration information
INFO_REPLICATION_REMOVE_TRUSTSTORE_CONTENTS=Removing truststore information
INFO_REPLICATION_INITIALIZE_ALL_SUFFIX_PROMPT=Initialize base DN %s?
INFO_REPLICATION_PRE_EXTERNAL_INITIALIZATION_SUFFIX_PROMPT=Are you going to \
 initialize with import-ldif or binary copy base DN %s?
@@ -773,21 +774,24 @@
 replication server in '%s' but the server already has a replication server \
 configured (with replication port '%d').  Do you want to continue?
INFO_REPLICATION_CONNECTING=Establishing connections
INFO_REPLICATION_ENABLE_UPDATING_ADS_CONTENTS=Checking Registration information
INFO_REPLICATION_ENABLE_UPDATING_ADS_CONTENTS=Checking registration information
INFO_REPLICATION_ENABLE_UPDATING_REPLICATION_SERVER=Updating remote references \
 on server %s
INFO_REPLICATION_ENABLE_CONFIGURING_REPLICATION_SERVER=Configuring Replication \
 port on server %s
INFO_REPLICATION_ENABLE_CONFIGURING_BASEDN=Updating replication configuration \
 for baseDN %s on server %s
INFO_REPLICATION_ENABLE_CONFIGURING_ADS=Updating Registration configuration \
INFO_REPLICATION_ENABLE_CONFIGURING_ADS=Updating registration configuration \
 on server %s
INFO_ENABLE_REPLICATION_INITIALIZING_ADS=Initializing Registration information \
INFO_ENABLE_REPLICATION_INITIALIZING_ADS=Initializing registration information \
 on server %s with the contents of server %s
INFO_ENABLE_REPLICATION_INITIALIZING_ADS_ALL=Initializing registration \
 information with the contents of server %s
INFO_ENABLE_REPLICATION_INITIALIZING_SCHEMA=Initializing schema on server %s \
 with the contents of server %s
SEVERE_ERR_REPLICATION_ENABLE_SEEDING_TRUSTSTORE=An unexpected error occurred \
 seeding the truststore contents.  Details: %s
 seeding the truststore contents of server %s with truststore of server %s.  \
 Details: %s
SEVERE_ERR_INITIALIZING_REPLICATIONID_NOT_FOUND=Error initializing.  Could not \
 find replication ID in the server %s for base DN %s.
SEVERE_ERR_REPLICATION_INITIALIZING_TRIES_COMPLETED=Error initializing.  Could \
@@ -798,13 +802,36 @@
 replication port on server %s.
SEVERE_ERR_REPLICATION_CONFIGURING_BASEDN=Error updating replication \
 configuration on base DN %s of server %s.
SEVERE_ERR_REPLICATION_UPDATING_ADS=Error updating Registration information.  \
SEVERE_ERR_REPLICATION_UPDATING_ADS=Error updating registration information.  \
 Details: %s
SEVERE_ERR_REPLICATION_READING_ADS=Error reading Registration information.  \
SEVERE_ERR_REPLICATION_READING_ADS=Error reading registration information.  \
 Details: %s
SEVERE_ERR_REPLICATION_ADS_MERGE_NOT_SUPPORTED=The registry information found \
 in servers %s and %s is different.  This tool does not allow to handle this \
 scenario.
 in servers %s and %s could not be merged.  Details: %s
SEVERE_ERR_REPLICATION_ENABLE_COMMON_DOMAIN_ID_ARG=Server %s (base DN '%s') \
 and server %s (base DN '%s') have the same domain ID: %d.
SEVERE_ERR_REPLICATION_ENABLE_COMMON_DOMAIN_ID=The following servers in the \
 two topologies have the same domain ID%n%s%n%nThe replication topologies \
 cannot be merged.  To fix this problem please refer to the documentation.
SEVERE_ERR_REPLICATION_ENABLE_COMMON_REPLICATION_SERVER_ID_ARG=Server %s \
 and server %s have the same replication server ID: %d.
SEVERE_ERR_REPLICATION_ENABLE_COMMON_REPLICATION_SERVER_ID=The following \
 servers in the two topologies have the same replication server ID%n%s%n%nThe \
 replication topologies cannot be merged.  To fix this problem please refer to \
 the documentation.
SEVERE_ERR_REPLICATION_CANNOT_MERGE_WITH_ERRORS=The errors reading the \
 registry information on %s do not allow to do the merge between the \
 replication topologies.  You will have to fix the following problems before \
 merging the topologies:%n%s
INFO_REPLICATION_MERGING_REGISTRIES_CONFIRMATION=To be able to configure \
 replication the registration information of servers %s and %s must be \
 merged.  If any conflict is detected, the information of server %s will be \
 kept and the information of server %s overridden.%nDo you want to continue?
INFO_REPLICATION_MERGING_REGISTRIES_DESCRIPTION=To be able to configure \
 replication the registration information of servers %s and %s must be \
 merged.  If any conflict is detected, the information of server %s will be \
 kept and the information of server %s overridden.
INFO_REPLICATION_MERGING_REGISTRIES_PROGRESS=Merging registration information
SEVERE_ERR_REPLICATION_ERROR_READING_CONFIGURATION=Error reading replication \
 configuration of server %s.%nDetails: %s
INFO_REPLICATION_REMOVING_REFERENCES_ON_REMOTE=Removing references on base DN \
@@ -813,7 +840,10 @@
 server %s
INFO_REPLICATION_DISABLING_REPLICATION_SERVER=Disabling replication port %s of \
 server %s
INFO_REPLICATION_STATUS_NO_BASEDNS=No base DN's found.
INFO_REPLICATION_STATUS_NO_BASEDNS=No replication information for the base \
 DN's found.
INFO_REPLICATION_STATUS_NO_REPLICATION_INFORMATION=No replication information \
 found.
INFO_REPLICATION_STATUS_BASEDN=Base DN
INFO_REPLICATION_STATUS_IS_REPLICATED=Replication
INFO_REPLICATION_STATUS_REPLICATED=%s - Replication Enabled
@@ -852,15 +882,15 @@
 have a replication server (with changelog and a replication port) but are not \
 linked to any server containing replicated data.
INFO_REPLICATION_ONLY_ONE_REPLICATION_SERVER_CONFIRM=Only one replication \
 server will be defined for the following base DNs:%n%s%nIt is recommended to \
 server will be defined for the following base DN's:%n%s%nIt is recommended to \
 have at least two replication servers (two changelogs) to avoid a single \
 point of failure in the replication topology.%nDo you want to continue?
INFO_REPLICATION_ONLY_ONE_REPLICATION_SERVER_WARNING=Only one replication \
 server will be defined for the following base DNs:%n%s%nIt is recommended to \
 server will be defined for the following base DN's:%n%s%nIt is recommended to \
 have at least two replication servers (two changelogs) to avoid a single \
 point of failure in the replication topology.
SEVERE_ERR_REPLICATION_NO_REPLICATION_SERVER=No replication server is defined \
 for the following base DNs:%n%s%nAt least one replication server (a \
 for the following base DN's:%n%s%nAt least one replication server (a \
 changelog) is required in the replication topology.  It is recommended to \
 have at least two replication servers (two changelogs) to avoid a single \
 point of failure in the replication topology.
@@ -1766,7 +1796,7 @@
INFO_CTRL_PANEL_SEPARATE_ATTRIBUTES_COMMA=Separate multiple attributes with a \
 comma (,)
MILD_ERR_CTRL_PANEL_NOT_A_DESCENDANT_OF_BASE_DN=The base DN '%s' is not a \
 descendant of any of the base DNs defined in backend '%s'.
 descendant of any of the base DN's defined in backend '%s'.
MILD_ERR_CTRL_PANEL_NOT_VALID_ATTRIBUTE_NAME=The attribute '%s' has not a \
 valid name.
MILD_ERR_CTRL_PANEL_INVALID_FILTER_DETAILS_WITH_VALUE=The provided value '%s' \
@@ -1922,7 +1952,7 @@
MILD_ERR_CTRL_PANEL_DELETING_SUBTREE_ERROR_SUMMARY=Error deleting subtree
MILD_ERR_CTRL_PANEL_DELETING_SUBTREE_ERROR_DETAILS=An error occurred deleting \
 subtree '%s'.
INFO_CTRL_PANEL_ALL_BASE_DNS=All Base DNs
INFO_CTRL_PANEL_ALL_BASE_DNS=All Base DN's
INFO_CTRL_PANEL_LDAP_FILTER=LDAP Filter:
INFO_CTRL_PANEL_USERS_FILTER=Users
INFO_CTRL_PANEL_GROUPS_FILTER=Groups
opendj-sdk/opends/src/messages/messages/quicksetup.properties
@@ -1354,4 +1354,9 @@
 '%s'.
INFO_ADS_CONTEXT_EXCEPTION_WITH_DETAILS_MSG=Registration information error.  \
 Error type: '%s'.  Details: %s
FATAL_ERR_ADS_MERGE=The registration information of server %s \
 and server %s could not be merged.  Reasons:%n%s
FATAL_ERR_ADS_ADMINISTRATOR_MERGE=The following administrators are defined in \
 server %s but not in server %s:%n%s%nThe merge can only be performed if these \
 administrators are defined in server %s.  Use the command-line dsframework \
 to do so.
opendj-sdk/opends/src/server/org/opends/server/admin/client/cli/DsFrameworkCliReturnCode.java
@@ -22,7 +22,7 @@
 * CDDL HEADER END
 *
 *
 *      Copyright 2006-2008 Sun Microsystems, Inc.
 *      Copyright 2006-2009 Sun Microsystems, Inc.
 */
package org.opends.server.admin.client.cli;
import org.opends.messages.Message;
@@ -143,7 +143,12 @@
    /**
     * The server entity is not yet registered.
     */
    SERVER_NOT_REGISTERED(19, ERR_ADMIN_SERVER_NOT_REGISTERED.get());
    SERVER_NOT_REGISTERED(19, ERR_ADMIN_SERVER_NOT_REGISTERED.get()),
    /**
     * The merge of the ADS cannot be performed.
     */
    ERROR_MERGING(20, ERR_ADMIN_MERGING.get());
    // The retunCodevalue of the value.
    private final int returnCode;
@@ -225,6 +230,8 @@
        MISSING_ADMIN_PASSWORD);
    adsErrorToReturnCode.put(ErrorType.ERROR_UNEXPECTED,
        ERROR_UNEXPECTED);
    adsErrorToReturnCode.put(ErrorType.ERROR_MERGING,
        ERROR_MERGING);
  }
  /**
opendj-sdk/opends/src/server/org/opends/server/tools/dsreplication/ReplicationCliArgumentParser.java
@@ -759,7 +759,7 @@
        "disablereplicationserver", null, "disableReplicationServer",
        INFO_DESCRIPTION_DISABLE_REPLICATION_SERVER.get());
    disableAllArg = new BooleanArgument(
        "disableall", null, "disableAll",
        "disableall", 'a', "disableAll",
        INFO_DESCRIPTION_DISABLE_ALL.get());
opendj-sdk/opends/src/server/org/opends/server/tools/dsreplication/ReplicationCliMain.java
@@ -2488,9 +2488,6 @@
        uData.setPwd1(pwd1);
      }
    }
    int replicationPort1 = getValue(argParser.getReplicationPort1(),
        argParser.getDefaultReplicationPort1());
    uData.setReplicationPort1(replicationPort1);
    uData.setSecureReplication1(argParser.isSecureReplication1());
    String host2Name = getValue(argParser.getHostName2(),
@@ -2526,9 +2523,6 @@
        uData.setPwd2(pwd2);
      }
    }
    int replicationPort2 = getValue(argParser.getReplicationPort2(),
        argParser.getDefaultReplicationPort2());
    uData.setReplicationPort2(replicationPort2);
    uData.setSecureReplication2(argParser.isSecureReplication2());
    uData.setReplicateSchema(!argParser.noSchemaReplication());
    uData.setConfigureReplicationDomain1(
@@ -2539,6 +2533,20 @@
        !argParser.noReplicationServer1Arg.isPresent());
    uData.setConfigureReplicationServer2(
        !argParser.noReplicationServer2Arg.isPresent());
    int replicationPort1 = getValue(argParser.getReplicationPort1(),
        argParser.getDefaultReplicationPort1());
    if (uData.configureReplicationServer1())
    {
      uData.setReplicationPort1(replicationPort1);
    }
    int replicationPort2 = getValue(argParser.getReplicationPort2(),
        argParser.getDefaultReplicationPort2());
    if (uData.configureReplicationServer2())
    {
      uData.setReplicationPort2(replicationPort2);
    }
  }
  /**
@@ -3286,12 +3294,21 @@
//    If we are not in interactive mode do some checks...
      if (!argParser.isInteractive())
      {
        boolean hasReplicationPort1 = hasReplicationPort(ctx1);
        boolean hasReplicationPort2 = hasReplicationPort(ctx2);
        int replPort1 = uData.getReplicationPort1();
        int replPort2 = uData.getReplicationPort2();
        if (!hasReplicationPort1)
        int replPort1 = getReplicationPort(ctx1);
        boolean hasReplicationPort1 = replPort1 > 0;
        if (replPort1 < 0 && uData.configureReplicationServer1())
        {
          replPort1 = uData.getReplicationPort1();
        }
        int replPort2 = getReplicationPort(ctx2);
        boolean hasReplicationPort2 = replPort2 > 0;
        if (replPort2 < 0 && uData.configureReplicationServer2())
        {
          replPort2 = uData.getReplicationPort2();
        }
        boolean checkReplicationPort1 = replPort1 > 0;
        boolean checkReplicationPort2 = replPort2 > 0;
        if (!hasReplicationPort1 && checkReplicationPort1)
        {
          if (!argParser.skipReplicationPortCheck() &&
              uData.configureReplicationServer1() &&
@@ -3301,7 +3318,7 @@
            errorMessages.add(getCannotBindToPortError(replPort1));
          }
        }
        if (!hasReplicationPort2)
        if (!hasReplicationPort2 && checkReplicationPort2)
        {
          if (!argParser.skipReplicationPortCheck() &&
              uData.configureReplicationServer2() &&
@@ -3311,7 +3328,7 @@
            errorMessages.add(getCannotBindToPortError(replPort2));
          }
        }
        if (!hasReplicationPort1 && !hasReplicationPort2 &&
        if (checkReplicationPort1 && checkReplicationPort2 &&
            (replPort1 == replPort2) &&
            (host1.equalsIgnoreCase(host2)))
        {
@@ -3323,14 +3340,14 @@
        {
          // This is something that we must do in any case... this test is
          // already included when we call SetupUtils.canUseAsPort
          if (replPort1 == port1)
          if (checkReplicationPort1 && replPort1 == port1)
          {
            errorMessages.add(
                ERR_REPLICATION_PORT_AND_REPLICATION_PORT_EQUAL.get(
                host1, String.valueOf(replPort1)));
          }
          if (replPort2 == port2)
          if (checkReplicationPort2 && replPort2 == port2)
          {
            errorMessages.add(
                ERR_REPLICATION_PORT_AND_REPLICATION_PORT_EQUAL.get(
@@ -4112,7 +4129,6 @@
    TreeSet<String> availableSuffixes;
    TreeSet<String> alreadyReplicatedSuffixes;
    if (uData.configureReplicationDomain1() &&
        uData.configureReplicationDomain2())
    {
@@ -4850,6 +4866,7 @@
    TopologyCacheFilter filter = new TopologyCacheFilter();
    filter.setSearchMonitoringInformation(false);
    filter.addBaseDNToSearch(ADSContext.getAdministrationSuffixDN());
    filter.addBaseDNToSearch(Constants.SCHEMA_DN);
    for (String dn : uData.getBaseDNs())
    {
      filter.addBaseDNToSearch(dn);
@@ -4994,6 +5011,7 @@
    ADSContext adsCtxSource = null;
    boolean adsAlreadyReplicated = false;
    boolean adsMergeDone = false;
    printProgress(formatter.getFormattedWithPoints(
        INFO_REPLICATION_ENABLE_UPDATING_ADS_CONTENTS.get()));
@@ -5059,13 +5077,19 @@
        }
        else if (!areEqual(registry1, registry2))
        {
          // TO COMPLETE: we may want to merge the ADS but for the moment
          // just say this is not supported.
          throw new ReplicationCliException(
              ERR_REPLICATION_ADS_MERGE_NOT_SUPPORTED.get(
                  ConnectionUtils.getHostPort(ctx1),
                  ConnectionUtils.getHostPort(ctx2)),
                  REPLICATION_ADS_MERGE_NOT_SUPPORTED, null);
          printProgress(formatter.getFormattedDone());
          printlnProgress();
          boolean isFirstSource = mergeRegistries(adsCtx1, adsCtx2);
          if (isFirstSource)
          {
            ctxSource = ctx1;
          }
          else
          {
            ctxSource = ctx2;
          }
          adsMergeDone = true;
        }
        else
        {
@@ -5142,7 +5166,7 @@
          ERR_REPLICATION_UPDATING_ADS.get(adce.getMessageObject()),
          ERROR_UPDATING_ADS, adce);
    }
    if (!adsAlreadyReplicated)
    if (!adsAlreadyReplicated && !adsMergeDone)
    {
      try
      {
@@ -5152,14 +5176,21 @@
      catch (Throwable t)
      {
        LOG.log(Level.SEVERE, "Error seeding truststores: "+t, t);
        String arg = (t instanceof OpenDsException) ?
            ((OpenDsException)t).getMessageObject().toString() : t.toString();
        throw new ReplicationCliException(
            ERR_REPLICATION_ENABLE_SEEDING_TRUSTSTORE.get(t.toString()),
            ERR_REPLICATION_ENABLE_SEEDING_TRUSTSTORE.get(
                ConnectionUtils.getHostPort(ctxDestination),
                ConnectionUtils.getHostPort(adsCtxSource.getDirContext()),
               arg),
            ERROR_SEEDING_TRUSTORE, t);
      }
    }
    printProgress(formatter.getFormattedDone());
    printlnProgress();
    if (!adsMergeDone)
    {
      printProgress(formatter.getFormattedDone());
      printlnProgress();
    }
    LinkedList<String> baseDNs = uData.getBaseDNs();
    if (!adsAlreadyReplicated)
    {
@@ -5187,6 +5218,7 @@
    }
    TopologyCache cache1 = null;
    TopologyCache cache2 = null;
    try
    {
      LinkedHashSet<PreferredConnection> cnx =
@@ -5435,7 +5467,27 @@
    // initialize the contents of one ADS with the other (in the case where
    // already both servers were replicating the same ADS there is nothing to be
    // done).
    if ((ctxSource != null) && (ctxDestination != null))
    if (adsMergeDone)
    {
      PointAdder pointAdder = new PointAdder();
      printProgress(
          INFO_ENABLE_REPLICATION_INITIALIZING_ADS_ALL.get(
              ConnectionUtils.getHostPort(ctxSource)));
      pointAdder.start();
      try
      {
        initializeAllSuffix(ADSContext.getAdministrationSuffixDN(),
          ctxSource, false);
      }
      finally
      {
        pointAdder.stop();
      }
      printProgress(formatter.getSpace());
      printProgress(formatter.getFormattedDone());
      printlnProgress();
    }
    else if ((ctxSource != null) && (ctxDestination != null))
    {
      printProgress(formatter.getFormattedWithPoints(
          INFO_ENABLE_REPLICATION_INITIALIZING_ADS.get(
@@ -5461,12 +5513,33 @@
        ctxSource = ctx1;
        ctxDestination = ctx2;
      }
      printProgress(formatter.getFormattedWithPoints(
          INFO_ENABLE_REPLICATION_INITIALIZING_SCHEMA.get(
              ConnectionUtils.getHostPort(ctxDestination),
              ConnectionUtils.getHostPort(ctxSource))));
      initializeSuffix(Constants.SCHEMA_DN, ctxSource,
      if (adsMergeDone)
      {
        PointAdder pointAdder = new PointAdder();
        printProgress(
            INFO_ENABLE_REPLICATION_INITIALIZING_SCHEMA.get(
                ConnectionUtils.getHostPort(ctxDestination),
                ConnectionUtils.getHostPort(ctxSource)));
        pointAdder.start();
        try
        {
          initializeAllSuffix(Constants.SCHEMA_DN, ctxSource, false);
        }
        finally
        {
          pointAdder.stop();
        }
        printProgress(formatter.getSpace());
      }
      else
      {
        printProgress(formatter.getFormattedWithPoints(
            INFO_ENABLE_REPLICATION_INITIALIZING_SCHEMA.get(
                ConnectionUtils.getHostPort(ctxDestination),
                ConnectionUtils.getHostPort(ctxSource))));
        initializeSuffix(Constants.SCHEMA_DN, ctxSource,
          ctxDestination, false);
      }
      printProgress(formatter.getFormattedDone());
      printlnProgress();
    }
@@ -5970,7 +6043,7 @@
      {
        // Delete all contents from ADSContext.
        printProgress(formatter.getFormattedWithPoints(
            INFO_REPLICATION_DISABLE_ADS_CONTENTS.get()));
            INFO_REPLICATION_REMOVE_ADS_CONTENTS.get()));
        adsCtx.removeAdminData();
        String adminBackendName = null;
        for (ReplicaDescriptor replica : server.getReplicas())
@@ -5988,11 +6061,27 @@
      }
      catch (ADSContextException adce)
      {
        LOG.log(Level.SEVERE, "Error resetting contents of cn=admin data: "+
        LOG.log(Level.SEVERE, "Error removing contents of cn=admin data: "+
            adce, adce);
        throw new ReplicationCliException(
            ERR_REPLICATION_UPDATING_ADS.get(adce.getMessageObject()),
            ERROR_READING_ADS, adce);
            ERROR_UPDATING_ADS, adce);
      }
      try
      {
        // Delete all contents from truststore.
        printProgress(formatter.getFormattedWithPoints(
            INFO_REPLICATION_REMOVE_TRUSTSTORE_CONTENTS.get()));
        ServerDescriptor.cleanAdsTrustStore(adsCtx.getDirContext());
        printProgress(formatter.getFormattedDone());
        printlnProgress();
      }
      catch (Throwable t)
      {
        LOG.log(Level.SEVERE, "Error removing contents of truststore: "+t, t);
        throw new ReplicationCliException(
            ERR_REPLICATION_UPDATING_ADS.get(t.toString()),
            ERROR_UPDATING_ADS, t);
      }
    }
  }
@@ -6010,6 +6099,7 @@
  {
    ADSContext adsCtx = new ADSContext(ctx);
    boolean somethingDisplayed = false;
    TopologyCache cache = null;
    try
    {
@@ -6142,13 +6232,9 @@
        {
          displayStatus(rServers, uData.isScriptFriendly(),
              getPreferredConnections(ctx));
          somethingDisplayed = true;
        }
      }
      else
      {
        printProgress(INFO_REPLICATION_STATUS_NO_BASEDNS.get());
        printlnProgress();
      }
    }
    if (!replicaLists.isEmpty())
@@ -6184,6 +6270,7 @@
        displayStatus(replicas, uData.isScriptFriendly(),
            getPreferredConnections(ctx), cache.getServers(),
            replicasWithNoReplicationServer, serversWithNoReplica);
        somethingDisplayed = true;
      }
      if (oneReplicated && !uData.isScriptFriendly())
      {
@@ -6202,6 +6289,20 @@
              INFO_REPLICATION_STATUS_NOT_A_REPLICATION_DOMAIN_LEGEND.get());
        }
        printlnProgress();
        somethingDisplayed = true;
      }
    }
    if (!somethingDisplayed)
    {
      if (displayAll)
      {
        printProgress(INFO_REPLICATION_STATUS_NO_REPLICATION_INFORMATION.get());
        printlnProgress();
      }
      else
      {
        printProgress(INFO_REPLICATION_STATUS_NO_BASEDNS.get());
        printlnProgress();
      }
    }
  }
@@ -6706,6 +6807,17 @@
        break;
      }
    }
    if (servers.isEmpty())
    {
      boolean found = false;
      for (ReplicaDescriptor replica : server.getReplicas())
      {
        if (Utils.areDnsEqual(replica.getSuffix().getDN(), baseDN))
        {
          found = true;
        }
      }
    }
    if (cache != null)
    {
      Set<SuffixDescriptor> suffixes = cache.getSuffixes();
@@ -6744,6 +6856,11 @@
      ServerDescriptor server)
  {
    SuffixDescriptor returnValue = null;
    String replicationServer = null;
    if (server.isReplicationServer())
    {
      replicationServer = server.getReplicationServerHostPort();
    }
    Set<String> servers = new LinkedHashSet<String>();
    for (ReplicaDescriptor replica : server.getReplicas())
    {
@@ -6771,6 +6888,18 @@
          returnValue = suffix;
          break;
        }
        else if (replicationServer != null)
        {
          // Check if the server is only a replication server.
          for (String repServer : s)
          {
            if (repServer.equalsIgnoreCase(replicationServer))
            {
              returnValue = suffix;
              break;
            }
          }
        }
      }
    }
    return returnValue;
@@ -7120,6 +7249,8 @@
      Set<String> alreadyConfiguredReplicationServers)
  throws ReplicationCliException
  {
    LOG.log(Level.INFO, "Configuring base DN '"+baseDN+
        "' the replication servers are "+repServers);
    Set<ServerDescriptor> serversToConfigureDomain =
      new HashSet<ServerDescriptor>();
    Set<ServerDescriptor> replicationServersToConfigure =
@@ -7140,7 +7271,7 @@
    for (ServerDescriptor s : cache.getServers())
    {
      if (s.isReplicationServer() &&
          !alreadyConfiguredReplicationServers.contains(server.getId()))
          !alreadyConfiguredReplicationServers.contains(s.getId()))
      {
        // Check if it is part of the replication topology
        boolean isInTopology = false;
@@ -7155,7 +7286,7 @@
        }
        if (isInTopology)
        {
          replicationServersToConfigure.add(server);
          replicationServersToConfigure.add(s);
        }
      }
    }
@@ -7166,40 +7297,29 @@
    for (ServerDescriptor s : allServers)
    {
      String dn = ConnectionUtils.getBindDN(
          cache.getAdsContext().getDirContext());
      String pwd = ConnectionUtils.getBindPassword(
          cache.getAdsContext().getDirContext());
      TopologyCacheFilter filter = new TopologyCacheFilter();
      filter.setSearchMonitoringInformation(false);
      filter.setSearchBaseDNInformation(false);
      ServerLoader loader = new ServerLoader(s.getAdsProperties(),
          dn, pwd, getTrustManager(), cache.getPreferredConnections(),
          filter);
      LOG.log(Level.INFO,"Configuring server "+server.getHostPort(true));
      InitialLdapContext ctx = null;
      try
      {
        ctx = loader.createContext();
        if (serversToConfigureDomain.contains(server))
        ctx = getDirContextForServer(cache, s);
        if (serversToConfigureDomain.contains(s))
        {
          configureToReplicateBaseDN(ctx, baseDN, repServers, usedIds);
        }
        if (replicationServersToConfigure.contains(server))
        if (replicationServersToConfigure.contains(s))
        {
          updateReplicationServer(ctx, allRepServers);
        }
      }
      catch (NamingException ne)
      {
        String hostPort = getHostPort(server,
            cache.getPreferredConnections());
        String hostPort = getHostPort(s, cache.getPreferredConnections());
        Message msg = getMessageForException(ne, hostPort);
        throw new ReplicationCliException(msg, ERROR_CONNECTING, ne);
      }
      catch (OpenDsException ode)
      {
        String hostPort = getHostPort(server,
            cache.getPreferredConnections());
        String hostPort = getHostPort(s, cache.getPreferredConnections());
        Message msg = getMessageForEnableException(ode, hostPort, baseDN);
        throw new ReplicationCliException(msg,
            ERROR_ENABLING_REPLICATION_ON_BASEDN, ode);
@@ -7607,7 +7727,7 @@
    attrs.put("ds-task-initialize-replica-server-id", "all");
    while (!taskCreated)
    {
      String id = "quicksetup-initialize"+i;
      String id = "dsreplication-initialize"+i;
      dn = "ds-task-id="+id+",cn=Scheduled Tasks,cn=Tasks";
      attrs.put("ds-task-id", id);
      try
@@ -9835,6 +9955,9 @@
        if (adsContext.hasAdminData())
        {
          cache1 = new TopologyCache(adsContext, getTrustManager());
          cache1.getFilter().setSearchMonitoringInformation(false);
          cache1.setPreferredConnections(getPreferredConnections(ctx1));
          cache1.reloadTopology();
        }
      }
      catch (Throwable t)
@@ -9852,6 +9975,9 @@
        if (adsContext.hasAdminData())
        {
          cache2 = new TopologyCache(adsContext, getTrustManager());
          cache2.getFilter().setSearchMonitoringInformation(false);
          cache2.setPreferredConnections(getPreferredConnections(ctx2));
          cache2.reloadTopology();
        }
      }
      catch (Throwable t)
@@ -9863,39 +9989,12 @@
    if (cache1 != null && cache2 != null)
    {
      // Check common suffixes
      Set<String> dns1 = new HashSet<String>();
      Set<String> dns2 = new HashSet<String>();
      Set<SuffixDescriptor> suffixes = cache1.getSuffixes();
      for (SuffixDescriptor suffix : suffixes)
      {
        for (String rServer : suffix.getReplicationServers())
        {
          if (rServer.equalsIgnoreCase(replicationServer1))
          {
            dns1.add(suffix.getDN());
          }
        }
      }
      suffixes = cache2.getSuffixes();
      for (SuffixDescriptor suffix : suffixes)
      {
        for (String rServer : suffix.getReplicationServers())
        {
          if (rServer.equalsIgnoreCase(replicationServer2))
          {
            dns2.add(suffix.getDN());
          }
        }
      }
      availableSuffixes.addAll(dns1);
      availableSuffixes.removeAll(dns2);
      alreadyReplicatedSuffixes.addAll(dns1);
      alreadyReplicatedSuffixes.retainAll(dns2);
      updateAvailableAndReplicatedSuffixesForNoDomainOneSense(cache1, cache2,
          replicationServer1, replicationServer2, availableSuffixes,
          alreadyReplicatedSuffixes);
      updateAvailableAndReplicatedSuffixesForNoDomainOneSense(cache2, cache1,
          replicationServer2, replicationServer1, availableSuffixes,
          alreadyReplicatedSuffixes);
    }
    else if (cache1 != null)
    {
@@ -9927,6 +10026,59 @@
    }
  }
  private void updateAvailableAndReplicatedSuffixesForNoDomainOneSense(
      TopologyCache cache1, TopologyCache cache2, String replicationServer1,
      String replicationServer2,
      Collection<String> availableSuffixes,
      Collection<String> alreadyReplicatedSuffixes)
  {
    Set<SuffixDescriptor> suffixes = cache1.getSuffixes();
    for (SuffixDescriptor suffix : suffixes)
    {
      for (String rServer : suffix.getReplicationServers())
      {
        if (rServer.equalsIgnoreCase(replicationServer1))
        {
          boolean isSecondReplicatedInSameTopology = false;
          boolean isSecondReplicated = false;
          boolean isFirstReplicated = false;
          for (SuffixDescriptor suffix2 : cache2.getSuffixes())
          {
            if (Utils.areDnsEqual(suffix.getDN(), suffix2.getDN()))
            {
              for (String rServer2 : suffix2.getReplicationServers())
              {
                if (rServer2.equalsIgnoreCase(replicationServer2))
                {
                  isSecondReplicated = true;
                }
                if (rServer.equalsIgnoreCase(replicationServer2))
                {
                  isFirstReplicated = true;
                }
                if (isFirstReplicated && isSecondReplicated)
                {
                  isSecondReplicatedInSameTopology = true;
                  break;
                }
              }
              break;
            }
          }
          if (!isSecondReplicatedInSameTopology)
          {
            availableSuffixes.add(suffix.getDN());
          }
          else
          {
            alreadyReplicatedSuffixes.add(suffix.getDN());
          }
          break;
        }
      }
    }
  }
  private void updateBaseDnsWithNotEnoughReplicationServer(ADSContext adsCtx1,
      ADSContext adsCtx2, EnableReplicationUserData uData,
      Set<String> baseDNsWithNoReplicationServer,
@@ -10054,9 +10206,367 @@
  {
    return server1.getId().equals(server2.getId());
  }
  /**
   * Merge the contents of the two registries but only does it partially.
   * Only one of the two ADSContext will be updated (in terms of data in
   * cn=admin data), while the other registry's replication servers will have
   * their truststore updated to be able to initialize all the contents.
   *
   * This method does NOT configure replication between topologies or initialize
   * replication.
   *
   * @param adsCtx1 the ADSContext of the first registry.
   * @param adsCtx2 the ADSContext of the second registry.
   * @return <CODE>true</CODE> if the registry containing all the data is
   * the first registry and <CODE>false</CODE> otherwise.
   * @throws ReplicationCliException if there is a problem reading or updating
   * the registries.
   */
  private boolean mergeRegistries(ADSContext adsCtx1, ADSContext adsCtx2)
  throws ReplicationCliException
  {
    PointAdder pointAdder = new PointAdder();
    try
    {
      LinkedHashSet<PreferredConnection> cnx =
        new LinkedHashSet<PreferredConnection>();
      cnx.addAll(getPreferredConnections(adsCtx1.getDirContext()));
      cnx.addAll(getPreferredConnections(adsCtx2.getDirContext()));
      // Check that there are no errors.  We do not allow to do the merge with
      // errors.
      TopologyCache cache1 = new TopologyCache(adsCtx1, getTrustManager());
      cache1.setPreferredConnections(cnx);
      cache1.getFilter().setSearchBaseDNInformation(false);
      try
      {
        cache1.reloadTopology();
      }
      catch (TopologyCacheException te)
      {
        LOG.log(Level.SEVERE, "Error reading topology cache of "+
            ConnectionUtils.getHostPort(adsCtx1.getDirContext())+ " "+te, te);
        throw new ReplicationCliException(
            ERR_REPLICATION_READING_ADS.get(te.getMessageObject()),
            ERROR_UPDATING_ADS, te);
      }
      TopologyCache cache2 = new TopologyCache(adsCtx2, getTrustManager());
      cache2.setPreferredConnections(cnx);
      cache2.getFilter().setSearchBaseDNInformation(false);
      try
      {
        cache2.reloadTopology();
      }
      catch (TopologyCacheException te)
      {
        LOG.log(Level.SEVERE, "Error reading topology cache of "+
            ConnectionUtils.getHostPort(adsCtx2.getDirContext())+ " "+te, te);
        throw new ReplicationCliException(
            ERR_REPLICATION_READING_ADS.get(te.getMessageObject()),
            ERROR_UPDATING_ADS, te);
      }
      // Look for the cache with biggest number of replication servers:
      // that one is going to be source.
      int nRepServers1 = 0;
      for (ServerDescriptor server : cache1.getServers())
      {
        if (server.isReplicationServer())
        {
          nRepServers1 ++;
        }
      }
      int nRepServers2 = 0;
      for (ServerDescriptor server : cache2.getServers())
      {
        if (server.isReplicationServer())
        {
          nRepServers2 ++;
        }
      }
      InitialLdapContext ctxSource;
      InitialLdapContext ctxDestination;
      if (nRepServers1 >= nRepServers2)
      {
        ctxSource = adsCtx1.getDirContext();
        ctxDestination = adsCtx2.getDirContext();
      }
      else
      {
        ctxSource = adsCtx2.getDirContext();
        ctxDestination = adsCtx1.getDirContext();
      }
      if (isInteractive())
      {
        Message msg = INFO_REPLICATION_MERGING_REGISTRIES_CONFIRMATION.get(
            ConnectionUtils.getHostPort(ctxSource),
            ConnectionUtils.getHostPort(ctxDestination),
            ConnectionUtils.getHostPort(ctxSource),
            ConnectionUtils.getHostPort(ctxDestination));
        try
        {
          if (!askConfirmation(msg, true, LOG))
          {
            throw new ReplicationCliException(
                ERR_REPLICATION_USER_CANCELLED.get(),
                ReplicationCliReturnCode.USER_CANCELLED, null);
          }
        }
        catch (CLIException ce)
        {
          println(ce.getMessageObject());
          throw new ReplicationCliException(
              ERR_REPLICATION_USER_CANCELLED.get(),
              ReplicationCliReturnCode.USER_CANCELLED, null);
        }
      }
      else
      {
        Message msg = INFO_REPLICATION_MERGING_REGISTRIES_DESCRIPTION.get(
            ConnectionUtils.getHostPort(ctxSource),
            ConnectionUtils.getHostPort(ctxDestination),
            ConnectionUtils.getHostPort(ctxSource),
            ConnectionUtils.getHostPort(ctxDestination));
      }
      printProgress(INFO_REPLICATION_MERGING_REGISTRIES_PROGRESS.get());
      pointAdder.start();
      Collection<Message> cache1Errors = getErrorMessages(cache1);
      if (!cache1Errors.isEmpty())
      {
        throw new ReplicationCliException(
            ERR_REPLICATION_CANNOT_MERGE_WITH_ERRORS.get(
                ConnectionUtils.getHostPort(adsCtx1.getDirContext()),
                Utils.getMessageFromCollection(cache1Errors,
                    Constants.LINE_SEPARATOR)),
                    ERROR_READING_ADS, null);
      }
      Collection<Message> cache2Errors = getErrorMessages(cache2);
      if (!cache2Errors.isEmpty())
      {
        throw new ReplicationCliException(
            ERR_REPLICATION_CANNOT_MERGE_WITH_ERRORS.get(
                ConnectionUtils.getHostPort(adsCtx2.getDirContext()),
                Utils.getMessageFromCollection(cache2Errors,
                    Constants.LINE_SEPARATOR)),
                    ERROR_READING_ADS, null);
      }
      Set<Message> commonRepServerIDErrors = new HashSet<Message>();
      for (ServerDescriptor server1 : cache1.getServers())
      {
        if (server1.isReplicationServer())
        {
          int replicationID1 = server1.getReplicationServerId();
          boolean found = false;
          for (ServerDescriptor server2 : cache2.getServers())
          {
            if (server2.isReplicationServer())
            {
              int replicationID2 = server2.getReplicationServerId();
              found = replicationID2 == replicationID1;
              if (found)
              {
                commonRepServerIDErrors.add(
                    ERR_REPLICATION_ENABLE_COMMON_REPLICATION_SERVER_ID_ARG.get(
                        server1.getHostPort(true),
                        server2.getHostPort(true),
                        replicationID1));
                found = true;
                break;
              }
            }
          }
          if (found)
          {
            break;
          }
        }
      }
      Set<Message> commonDomainIDErrors = new HashSet<Message>();
      for (SuffixDescriptor suffix1 : cache1.getSuffixes())
      {
        for (ReplicaDescriptor replica1 : suffix1.getReplicas())
        {
          if (replica1.isReplicated())
          {
            int domain1 = replica1.getReplicationId();
            boolean found = false;
            for (SuffixDescriptor suffix2 : cache2.getSuffixes())
            {
              if (!Utils.areDnsEqual(suffix2.getDN(),
                  replica1.getSuffix().getDN()))
              {
                // Conflicting domain names must apply to same suffix.
                continue;
              }
              for (ReplicaDescriptor replica2 : suffix2.getReplicas())
              {
                if (replica2.isReplicated())
                {
                  int domain2 = replica2.getReplicationId();
                  if (domain1 == domain2)
                  {
                    commonDomainIDErrors.add(
                        ERR_REPLICATION_ENABLE_COMMON_DOMAIN_ID_ARG.get(
                            replica1.getServer().getHostPort(true),
                            suffix1.getDN(),
                            replica2.getServer().getHostPort(true),
                            suffix2.getDN(),
                            domain1));
                    found = true;
                    break;
                  }
                }
              }
              if (found)
              {
                break;
              }
            }
          }
        }
      }
      if (!commonRepServerIDErrors.isEmpty() || !commonDomainIDErrors.isEmpty())
      {
        MessageBuilder mb = new MessageBuilder();
        if (!commonRepServerIDErrors.isEmpty())
        {
          mb.append(ERR_REPLICATION_ENABLE_COMMON_REPLICATION_SERVER_ID.get(
            Utils.getMessageFromCollection(commonRepServerIDErrors,
                Constants.LINE_SEPARATOR)));
        }
        if (!commonDomainIDErrors.isEmpty())
        {
          if (mb.length() > 0)
          {
            mb.append(Constants.LINE_SEPARATOR);
          }
          mb.append(ERR_REPLICATION_ENABLE_COMMON_DOMAIN_ID.get(
            Utils.getMessageFromCollection(commonDomainIDErrors,
                Constants.LINE_SEPARATOR)));
        }
        throw new ReplicationCliException(mb.toMessage(),
            ReplicationCliReturnCode.REPLICATION_ADS_MERGE_NOT_SUPPORTED,
            null);
      }
      ADSContext adsCtxSource;
      ADSContext adsCtxDestination;
      TopologyCache cacheDestination;
      if (nRepServers1 >= nRepServers2)
      {
        adsCtxSource = adsCtx1;
        adsCtxDestination = adsCtx2;
        cacheDestination = cache2;
      }
      else
      {
        adsCtxSource = adsCtx2;
        adsCtxDestination = adsCtx1;
        cacheDestination = cache1;
      }
      try
      {
        adsCtxSource.mergeWithRegistry(adsCtxDestination);
      }
      catch (ADSContextException adce)
      {
        LOG.log(Level.SEVERE, "Error merging registry of "+
            ConnectionUtils.getHostPort(adsCtxSource.getDirContext())+
            " with registry of "+
            ConnectionUtils.getHostPort(adsCtxDestination.getDirContext())+" "+
            adce, adce);
        if (adce.getError() == ADSContextException.ErrorType.ERROR_MERGING)
        {
          throw new ReplicationCliException(adce.getMessageObject(),
          REPLICATION_ADS_MERGE_NOT_SUPPORTED, adce);
        }
        else
        {
          throw new ReplicationCliException(
              ERR_REPLICATION_UPDATING_ADS.get(adce.getMessageObject()),
              ERROR_UPDATING_ADS, adce);
        }
      }
      try
      {
        for (ServerDescriptor server : cacheDestination.getServers())
        {
          if (server.isReplicationServer())
          {
            LOG.log(Level.INFO, "Seeding to replication server on "+
                server.getHostPort(true)+" with certificates of "+
                ConnectionUtils.getHostPort(adsCtxSource.getDirContext()));
            InitialLdapContext ctx = null;
            try
            {
              ctx = getDirContextForServer(cacheDestination, server);
              ServerDescriptor.seedAdsTrustStore(ctx,
                  adsCtxSource.getTrustedCertificates());
            }
            finally
            {
              if (ctx != null)
              {
                ctx.close();
              }
            }
          }
        }
      }
      catch (Throwable t)
      {
        LOG.log(Level.SEVERE, "Error seeding truststore: "+t, t);
        String arg = (t instanceof OpenDsException) ?
            ((OpenDsException)t).getMessageObject().toString() : t.toString();
            throw new ReplicationCliException(
                ERR_REPLICATION_ENABLE_SEEDING_TRUSTSTORE.get(
                    ConnectionUtils.getHostPort(adsCtx2.getDirContext()),
                    ConnectionUtils.getHostPort(adsCtx1.getDirContext()),
                    arg),
                    ERROR_SEEDING_TRUSTORE, t);
      }
      pointAdder.stop();
      printProgress(formatter.getSpace());
      printProgress(formatter.getFormattedDone());
      printlnProgress();
      return adsCtxSource == adsCtx1;
    }
    finally
    {
      pointAdder.stop();
    }
  }
  private InitialLdapContext getDirContextForServer(TopologyCache cache,
      ServerDescriptor server) throws NamingException
  {
    String dn = ConnectionUtils.getBindDN(
        cache.getAdsContext().getDirContext());
    String pwd = ConnectionUtils.getBindPassword(
        cache.getAdsContext().getDirContext());
    TopologyCacheFilter filter = new TopologyCacheFilter();
    filter.setSearchMonitoringInformation(false);
    filter.setSearchBaseDNInformation(false);
    ServerLoader loader = new ServerLoader(server.getAdsProperties(),
        dn, pwd, getTrustManager(), cache.getPreferredConnections(),
        filter);
    return loader.createContext();
  }
}
/**
 * Class used to compare replication servers.
 *
opendj-sdk/opends/src/server/org/opends/server/util/cli/ConsoleApplication.java
@@ -22,7 +22,7 @@
 * CDDL HEADER END
 *
 *
 *      Copyright 2008 Sun Microsystems, Inc.
 *      Copyright 2008-2009 Sun Microsystems, Inc.
 */
package org.opends.server.util.cli;
@@ -61,6 +61,9 @@
import org.opends.admin.ads.util.ConnectionUtils;
import org.opends.admin.ads.util.OpendsCertificateException;
import org.opends.messages.Message;
import org.opends.messages.MessageBuilder;
import org.opends.quicksetup.util.PlainTextProgressMessageFormatter;
import org.opends.quicksetup.util.ProgressMessageFormatter;
import org.opends.quicksetup.util.Utils;
import org.opends.server.protocols.ldap.LDAPResultCode;
import org.opends.server.tools.ClientException;
@@ -1057,4 +1060,118 @@
    }
    return timeStr;
  }
  /**
   * The default period time used to write points in the output.
   */
  protected static final long DEFAULT_PERIOD_TIME = 3000;
  /**
   * Class used to add points periodically to the end of the output.
   *
   */
  protected class PointAdder implements Runnable
  {
    private Thread t;
    private boolean stopPointAdder;
    private boolean pointAdderStopped;
    private long periodTime = DEFAULT_PERIOD_TIME;
    private boolean isError;
    private ProgressMessageFormatter formatter;
    /**
     * Default constructor.
     * Creates a PointAdder that writes to the standard output with the default
     * period time.
     */
    public PointAdder()
    {
      this(DEFAULT_PERIOD_TIME, false, new PlainTextProgressMessageFormatter());
    }
    /**
     * Default constructor.
     * @param periodTime the time between printing two points.
     * @param isError whether the points must be printed in error stream
     * or output stream.
     * @param formatter the text formatter.
     */
    public PointAdder(long periodTime, boolean isError,
        ProgressMessageFormatter formatter)
    {
      this.periodTime = periodTime;
      this.isError = isError;
      this.formatter = formatter;
    }
    /**
     * Starts the PointAdder: points are added at the end of the logs
     * periodically.
     */
    public void start()
    {
      MessageBuilder mb = new MessageBuilder();
      mb.append(formatter.getSpace());
      for (int i=0; i< 5; i++)
      {
        mb.append(formatter.getFormattedPoint());
      }
      if (isError)
      {
        print(mb.toMessage());
      }
      else
      {
        printProgress(mb.toMessage());
      }
      t = new Thread(this);
      t.start();
    }
    /**
     * Stops the PointAdder: points are no longer added at the end of the logs
     * periodically.
     */
    public synchronized void stop()
    {
      stopPointAdder = true;
      while (!pointAdderStopped)
      {
        try
        {
          t.interrupt();
          // To allow the thread to set the boolean.
          Thread.sleep(100);
        }
        catch (Throwable t)
        {
        }
      }
    }
    /**
     * {@inheritDoc}
     */
    public void run()
    {
      while (!stopPointAdder)
      {
        try
        {
          Thread.sleep(periodTime);
          if (isError)
          {
            print(formatter.getFormattedPoint());
          }
          else
          {
            printProgress(formatter.getFormattedPoint());
          }
        }
        catch (Throwable t)
        {
        }
      }
      pointAdderStopped = true;
    }
  }
}