opends/src/server/org/opends/server/replication/server/DataServerHandler.java
@@ -23,7 +23,7 @@ * * * Copyright 2006-2010 Sun Microsystems, Inc. * Portions copyright 2011-2012 ForgeRock AS * Portions copyright 2011-2013 ForgeRock AS */ package org.opends.server.replication.server; @@ -61,7 +61,7 @@ */ public class DataServerHandler extends ServerHandler { // Temporay generationId received in handshake/phase1, // Temporary generationId received in handshake/phase1, // and used after handshake/phase2 long tmpGenerationId; @@ -204,7 +204,7 @@ if (newStatus == ServerStatus.INVALID_STATUS) { Message msg = ERR_RS_CANNOT_CHANGE_STATUS.get(getServiceId().toString(), Message msg = ERR_RS_CANNOT_CHANGE_STATUS.get(getServiceId(), Integer.toString(serverId), status.toString(), event.toString()); logError(msg); return; @@ -393,7 +393,7 @@ ServerStatus newStatus = StatusMachine.computeNewStatus(status, event); if (newStatus == ServerStatus.INVALID_STATUS) { Message msg = ERR_RS_CANNOT_CHANGE_STATUS.get(getServiceId().toString(), Message msg = ERR_RS_CANNOT_CHANGE_STATUS.get(getServiceId(), Integer.toString(serverId), status.toString(), event.toString()); logError(msg); return ServerStatus.INVALID_STATUS; @@ -483,8 +483,21 @@ * let the reader thread see the closure and cleanup any reference * to old connection. This must be done before taking the domain lock so * that the reader thread has a chance to stop the handler. * * TODO: This hack should be removed and disconnection/reconnection * properly dealt with. */ replicationServerDomain.waitDisconnection(inServerStartMsg.getServerId()); if (replicationServerDomain.getConnectedDSs() .containsKey(inServerStartMsg.getServerId())) { try { Thread.sleep(100); } catch(Exception e){ abortStart(null); return; } } // lock with no timeout lockDomain(false); @@ -649,11 +662,10 @@ */ public DSInfo toDSInfo() { DSInfo dsInfo = new DSInfo(serverId, replicationServerId, generationId, return new DSInfo(serverId, replicationServerId, generationId, status, assuredFlag, assuredMode, safeDataLevel, groupId, refUrls, eclIncludes, eclIncludesForDeletes, protocolVersion); return dsInfo; } /** opends/src/server/org/opends/server/replication/server/ReplicationServerDomain.java
@@ -23,7 +23,7 @@ * * * Copyright 2006-2010 Sun Microsystems, Inc. * Portions copyright 2011-2012 ForgeRock AS * Portions copyright 2011-2013 ForgeRock AS */ package org.opends.server.replication.server; @@ -492,10 +492,7 @@ } // Push the message to the other subscribing handlers Iterator<MessageHandler> otherIter = otherHandlers.iterator(); while (otherIter.hasNext()) { MessageHandler handler = otherIter.next(); for (MessageHandler handler : otherHandlers) { handler.add(update, sourceHandler); } } @@ -905,8 +902,7 @@ } // Increment assured counters boolean safeRead = (expectedAcksInfo instanceof SafeReadExpectedAcksInfo) ? true : false; (expectedAcksInfo instanceof SafeReadExpectedAcksInfo); if (safeRead) { origServer.incrementAssuredSrReceivedUpdatesTimeout(); @@ -963,24 +959,6 @@ } } /** * Wait a short while for ServerId disconnection. * * @param serverId the serverId to be checked. */ public void waitDisconnection(int serverId) { if (directoryServers.containsKey(serverId)) { // try again try { Thread.sleep(100); } catch (InterruptedException e) { } } } /** * Stop operations with a list of replication servers. @@ -1026,14 +1004,13 @@ */ public boolean checkForDuplicateDS(DataServerHandler handler) { DataServerHandler oldHandler = directoryServers.get(handler.getServerId()); if (directoryServers.containsKey(handler.getServerId())) { // looks like two LDAP servers have the same serverId // looks like two connected LDAP servers have the same serverId Message message = ERR_DUPLICATE_SERVER_ID.get( replicationServer.getMonitorInstanceName(), oldHandler.toString(), handler.toString(), handler.getServerId()); replicationServer.getMonitorInstanceName(), directoryServers.get(handler.getServerId()).toString(), handler.toString(), handler.getServerId()); logError(message); return false; } @@ -1096,7 +1073,7 @@ if (handler.isReplicationServer()) { if (replicationServers.containsValue(handler)) if (replicationServers.containsKey(handler.getServerId())) { unregisterServerHandler(handler); handler.shutdown(); @@ -1110,7 +1087,7 @@ buildAndSendTopoInfoToDSs(null); } } } else if (directoryServers.containsValue(handler)) } else if (directoryServers.containsKey(handler.getServerId())) { // If this is the last DS for the domain, // shutdown the status analyzer @@ -1434,7 +1411,7 @@ return null; } if (it.next() == false) if (!it.next()) { it.releaseCursor(); return null; @@ -1459,8 +1436,7 @@ if (handler == null) return 0; int count = handler.getCount(from, to); return count; return handler.getCount(from, to); } /** @@ -1655,9 +1631,8 @@ MessageBuilder mb1 = new MessageBuilder(); mb1.append( NOTE_ERR_ROUTING_TO_SERVER.get(msg.getClass().getCanonicalName())); mb1.append("serverID:" + msg.getDestination()); ErrorMsg errMsg = new ErrorMsg( msg.getSenderID(), mb1.toMessage()); mb1.append("serverID:").append(msg.getDestination()); ErrorMsg errMsg = new ErrorMsg(msg.getSenderID(), mb1.toMessage()); try { senderHandler.send(errMsg); @@ -1678,9 +1653,9 @@ MessageBuilder mb = new MessageBuilder(); mb.append(ERR_NO_REACHABLE_PEER_IN_THE_DOMAIN.get( this.baseDn, Integer.toString(msg.getDestination()))); mb.append(" In Replication Server=" + mb.append(" In Replication Server=").append( this.replicationServer.getMonitorInstanceName()); mb.append(" unroutable message =" + msg.getClass().getSimpleName()); mb.append(" unroutable message =").append(msg.getClass().getSimpleName()); mb.append(" Details:routing table is empty"); ErrorMsg errMsg = new ErrorMsg( this.replicationServer.getServerId(), @@ -1915,13 +1890,12 @@ { for (DataServerHandler handler : directoryServers.values()) { if ((notThisOne == null) || // All DSs requested ((notThisOne != null) && (handler != notThisOne))) if ((notThisOne == null) || ((handler != notThisOne))) // All except passed one { for (int i=1; i<2; i++) for (int i=1; i<=2; i++) { if (handler.shuttingDown()==false) if (!handler.shuttingDown()) { if (handler.getStatus() != ServerStatus.NOT_CONNECTED_STATUS) { @@ -1960,9 +1934,9 @@ TopologyMsg topoMsg = createTopologyMsgForRS(); for (ReplicationServerHandler handler : replicationServers.values()) { for (int i=1; i<2; i++) for (int i=1; i<=2; i++) { if (handler.shuttingDown()==false) if (!handler.shuttingDown()) { if (handler.getStatus() != ServerStatus.NOT_CONNECTED_STATUS) { @@ -2685,42 +2659,38 @@ /** * Start collecting global monitoring information for this * ReplicationServerDomain. * * @throws DirectoryException * In case the monitoring information could not be collected. */ private void initializePendingMonitorData() { // Let's process our directly connected LSes // - in the ServerHandler for a given LS1, the stored state contains : // - the max CN produced by LS1 // - the last CN consumed by LS1 from LS2..n // Let's process our directly connected DS // - in the ServerHandler for a given DS1, the stored state contains : // - the max CN produced by DS1 // - the last CN consumed by DS1 from DS2..n // - in the RSdomain/dbHandler, the built-in state contains : // - the max CN produced by each server // So for a given LS connected we can take the state and the max from // the LS/state. // So for a given DS connected we can take the state and the max from // the DS/state. for (ServerHandler directlsh : directoryServers.values()) for (ServerHandler ds : directoryServers.values()) { int serverID = directlsh.getServerId(); int serverID = ds.getServerId(); // the state comes from the state stored in the SH ServerState directlshState = directlsh.getServerState() ServerState dsState = ds.getServerState() .duplicate(); // the max CN sent by that LS also comes from the SH ChangeNumber maxcn = directlshState .getMaxChangeNumber(serverID); ChangeNumber maxcn = dsState.getMaxChangeNumber(serverID); if (maxcn == null) { // This directly connected LS has never produced any change maxcn = new ChangeNumber(0, 0, serverID); } pendingMonitorData.setMaxCN(serverID, maxcn); pendingMonitorData.setLDAPServerState(serverID, directlshState); pendingMonitorData.setLDAPServerState(serverID, dsState); pendingMonitorData.setFirstMissingDate(serverID, directlsh.getApproxFirstMissingDate()); ds.getApproxFirstMissingDate()); } // Then initialize the max CN for the LS that produced something @@ -2729,10 +2699,7 @@ ServerState dbServerState = getDbServerState(); pendingMonitorData.setRSState(replicationServer.getServerId(), dbServerState); Iterator<Integer> it = dbServerState.iterator(); while (it.hasNext()) { int sid = it.next(); for (int sid : dbServerState) { ChangeNumber storedCN = dbServerState.getMaxChangeNumber(sid); pendingMonitorData.setMaxCN(sid, storedCN); } @@ -2746,7 +2713,7 @@ * * @param msg * The message to be processed. * @param globalServerHandlerId * @param serverId * server handler that is receiving the message. */ private void receivesMonitorDataResponse(MonitorMsg msg, @@ -2833,7 +2800,7 @@ finally { // Decreases the number of expected responses and potentially // wakes up the waiting requestor thread. // wakes up the waiting requester thread. if (pendingMonitorDataServerIDs.remove(serverId)) { pendingMonitorDataLatch.countDown(); @@ -2986,7 +2953,7 @@ { if (statusAnalyzer != null) { statusAnalyzer.setDeradedStatusThreshold(degradedStatusThreshold); statusAnalyzer.setDegradedStatusThreshold(degradedStatusThreshold); } } @@ -3160,65 +3127,46 @@ */ public ServerState getEligibleState(ChangeNumber eligibleCN) { ServerState result = new ServerState(); ServerState dbState = this.getDbServerState(); // The result is initialized from the dbState. // From it, we don't want to keep the changes newer than eligibleCN. result = dbState.duplicate(); ServerState result = dbState.duplicate(); if (eligibleCN != null) { Iterator<Integer> it = dbState.iterator(); while (it.hasNext()) { int sid = it.next(); for (int sid : dbState) { DbHandler h = sourceDbHandlers.get(sid); ChangeNumber mostRecentDbCN = dbState.getMaxChangeNumber(sid); try { try { // Is the most recent change in the Db newer than eligible CN ? // if yes (like cn15 in the example above, then we have to go back // to the Db and look for the change older than eligible CN (cn14) if (eligibleCN.olderOrEqual(mostRecentDbCN)) { if (eligibleCN.olderOrEqual(mostRecentDbCN)) { // let's try to seek the first change <= eligibleCN ReplicationIterator ri = null; try { try { ri = h.generateIterator(eligibleCN); if ((ri != null) && (ri.getChange()!=null)) { if ((ri != null) && (ri.getChange() != null)) { ChangeNumber newCN = ri.getChange().getChangeNumber(); result.update(newCN); } } catch(Exception e) { } catch (Exception e) { // there's no change older than eligibleCN (case of s3/cn31) result.update(new ChangeNumber(0,0,sid)); } finally { if (ri != null) { result.update(new ChangeNumber(0, 0, sid)); } finally { if (ri != null) { ri.releaseCursor(); ri = null; } } } else { // for this serverid, all changes in the ChangelogDb are holder } else { // for this serverId, all changes in the ChangelogDb are holder // than eligibleCN , the most recent in the db is our guy. result.update(mostRecentDbCN); } } catch(Exception e) { } catch (Exception e) { Message errMessage = ERR_WRITER_UNEXPECTED_EXCEPTION.get( " " + stackTraceToSingleLineString(e)); " " + stackTraceToSingleLineString(e)); logError(errMessage); TRACER.debugCaught(DebugLogLevel.ERROR, e); } @@ -3234,7 +3182,7 @@ /** * Returns the start state of the domain, made of the first (oldest) * change stored for each serverId. * Note: Because the replication changelogdb triming always keep one change * Note: Because the replication changelogdb trimming always keep one change * whatever its date, the change contained in the returned state can be very * old. * @return the start state of the domain. @@ -3433,21 +3381,18 @@ // Parses the dbState of the domain , server by server ServerState dbState = this.getDbServerState(); Iterator<Integer> serverIDIterator = dbState.iterator(); while (serverIDIterator.hasNext()) { for (int sid : dbState) { // process one sid int sid = serverIDIterator.next(); ChangeNumber startCN = null; if (startState.getMaxChangeNumber(sid) != null) startCN = startState.getMaxChangeNumber(sid); long sidRes = getCount(sid, startCN, endCN); // The startPoint is excluded when counting the ECL eligible changes if ((startCN!=null)&&(sidRes>0)) if ((startCN != null) && (sidRes > 0)) sidRes--; res+=sidRes; res += sidRes; } return res; } @@ -3466,14 +3411,11 @@ // Parses the dbState of the domain , server by server ServerState dbState = this.getDbServerState(); Iterator<Integer> serverIDIterator = dbState.iterator(); while (serverIDIterator.hasNext()) { for (int sid : dbState) { // process one sid int sid = serverIDIterator.next(); ChangeNumber lStartCN = new ChangeNumber(startCN.getTime(), startCN.getSeqnum(), sid); res+=getCount(sid, lStartCN, endCN); new ChangeNumber(startCN.getTime(), startCN.getSeqnum(), sid); res += getCount(sid, lStartCN, endCN); } return res; } opends/src/server/org/opends/server/replication/server/ReplicationServerHandler.java
@@ -23,7 +23,7 @@ * * * Copyright 2006-2010 Sun Microsystems, Inc. * Portions copyright 2011-2012 ForgeRock AS * Portions copyright 2011-2013 ForgeRock AS */ package org.opends.server.replication.server; @@ -213,9 +213,11 @@ return; } // Since we are going to send the topology message before having received // one, we need to set the generation ID as soon as possible if it is // currently uninitialized. See OpenDJ-121. /* Since we are going to send the topology message before having received one, we need to set the generation ID as soon as possible if it is currently uninitialized. See OpenDJ-121. */ if (localGenerationId < 0 && generationId > 0) { oldGenerationId = replicationServerDomain.changeGenerationId( @@ -232,12 +234,13 @@ if (protocolVersion > ProtocolVersion.REPLICATION_PROTOCOL_V1) { // Only protocol version above V1 has a phase 2 handshake /* Only protocol version above V1 has a phase 2 handshake NOW PROCEDE WITH SECOND PHASE OF HANDSHAKE: TopologyMsg then TopologyMsg (with a RS) // NOW PROCEDE WITH SECOND PHASE OF HANDSHAKE: // TopologyMsg then TopologyMsg (with a RS) // Send our own TopologyMsg to remote RS Send our own TopologyMsg to remote RS */ TopologyMsg outTopoMsg = sendTopoToRemoteRS(); // wait and process Topo from remote RS @@ -254,12 +257,16 @@ // Create the monitoring publisher for the domain if not already started createMonitoringPublisher(); // FIXME: i think this should be done for all protocol version !! // not only those > V1 /* FIXME: i think this should be done for all protocol version !! not only those > V1 */ registerIntoDomain(); // Process TopologyMsg sent by remote RS: store matching new info // (this will also warn our connected DSs of the new received info) /* Process TopologyMsg sent by remote RS: store matching new info (this will also warn our connected DSs of the new received info) */ replicationServerDomain.receiveTopoInfoFromRS(inTopoMsg, this, false); } @@ -339,19 +346,22 @@ // log logStartHandshakeRCVandSND(inReplServerStartMsg, outReplServerStartMsg); // until here session is encrypted then it depends on the negotiation // The session initiator decides whether to use SSL. /* until here session is encrypted then it depends on the negotiation The session initiator decides whether to use SSL. */ if (!sslEncryption) session.stopEncryption(); TopologyMsg inTopoMsg = null; if (protocolVersion > ProtocolVersion.REPLICATION_PROTOCOL_V1) { // Only protocol version above V1 has a phase 2 handshake // NOW PROCEDE WITH SECOND PHASE OF HANDSHAKE: // TopologyMsg then TopologyMsg (with a RS) // wait and process Topo from remote RS /* Only protocol version above V1 has a phase 2 handshake NOW PROCEDE WITH SECOND PHASE OF HANDSHAKE: TopologyMsg then TopologyMsg (with a RS) wait and process Topo from remote RS */ inTopoMsg = waitAndProcessTopoFromRemoteRS(); if (inTopoMsg == null) { @@ -384,76 +394,17 @@ } } else { if (localGenerationId > 0) { // if the local RS is initialized if (generationId > 0) { // if the remote RS is initialized if (generationId != localGenerationId) { // if the 2 RS have different generationID if (replicationServerDomain.getGenerationIdSavedStatus()) { // if the present RS has received changes regarding its // gen ID and so won't change without a reset // then we are just degrading the peer. Message message = WARN_BAD_GENERATION_ID_FROM_RS .get(serverId, session.getReadableRemoteAddress(), generationId, getServiceId(), getReplicationServerId(), localGenerationId); logError(message); } else { // The present RS has never received changes regarding its // gen ID. // // Example case: // - we are in RS1 // - RS2 has genId2 from LS2 (genId2 <=> no data in LS2) // - RS1 has genId1 from LS1 /genId1 comes from data in // suffix // - we are in RS1 and we receive a START msg from RS2 // - Each RS keeps its genID / is degraded and when LS2 // will be populated from LS1 everything will become ok. // // Issue: // FIXME : Would it be a good idea in some cases to just // set the gen ID received from the peer RS // specially if the peer has a non null state and // we have a nul state ? // replicationServerDomain. // setGenerationId(generationId, false); Message message = WARN_BAD_GENERATION_ID_FROM_RS .get(serverId, session.getReadableRemoteAddress(), generationId, getServiceId(), getReplicationServerId(), localGenerationId); logError(message); } } } else { // The remote RS has no genId. We don't change anything for the // current RS. } } else { // The local RS is not initialized - take the one received oldGenerationId = replicationServerDomain.changeGenerationId(generationId, false); } checkGenerationId(); } // Note: the supported scenario for V1->V2 upgrade is to upgrade 1 by 1 // all the servers of the topology. We prefer not not send a TopologyMsg // for giving partial/false information to the V2 servers as for // instance we don't have the connected DS of the V1 RS...When the V1 // RS will be upgraded in his turn, topo info will be sent and accurate. // That way, there is no risk to have false/incomplete information in // other servers. /* Note: the supported scenario for V1->V2 upgrade is to upgrade 1 by 1 all the servers of the topology. We prefer not not send a TopologyMsg for giving partial/false information to the V2 servers as for instance we don't have the connected DS of the V1 RS...When the V1 RS will be upgraded in his turn, topo info will be sent and accurate. That way, there is no risk to have false/incomplete information in other servers. */ } @@ -543,7 +494,7 @@ private TopologyMsg waitAndProcessTopoFromRemoteRS() throws DirectoryException { ReplicationMsg msg = null; ReplicationMsg msg; try { msg = session.receive(); @@ -582,12 +533,16 @@ } else { // Remote RS uses protocol version prior to 4 : use default value for // weight: 1 /* Remote RS uses protocol version prior to 4 : use default value for weight: 1 */ } // if the remote RS and the local RS have the same genID // then it's ok and nothing else to do /* if the remote RS and the local RS have the same genID then it's ok and nothing else to do */ if (generationId == localGenerationId) { if (debugEnabled()) @@ -601,75 +556,91 @@ } else { if (localGenerationId > 0) { // if the local RS is initialized if (generationId > 0) { // if the remote RS is initialized if (generationId != localGenerationId) { // if the 2 RS have different generationID if (replicationServerDomain.getGenerationIdSavedStatus()) { // if the present RS has received changes regarding its // gen ID and so won't change without a reset // then we are just degrading the peer. Message message = WARN_BAD_GENERATION_ID_FROM_RS.get( serverId, session.getReadableRemoteAddress(), generationId, getServiceId(), getReplicationServerId(), localGenerationId); logError(message); } else { // The present RS has never received changes regarding its // gen ID. // // Example case: // - we are in RS1 // - RS2 has genId2 from LS2 (genId2 <=> no data in LS2) // - RS1 has genId1 from LS1 /genId1 comes from data in // suffix // - we are in RS1 and we receive a START msg from RS2 // - Each RS keeps its genID / is degraded and when LS2 // will be populated from LS1 everything will become ok. // // Issue: // FIXME : Would it be a good idea in some cases to just // set the gen ID received from the peer RS // specially if the peer has a non null state and // we have a nul state ? // replicationServerDomain. // setGenerationId(generationId, false); Message message = WARN_BAD_GENERATION_ID_FROM_RS.get( serverId, session.getReadableRemoteAddress(), generationId, getServiceId(), getReplicationServerId(), localGenerationId); logError(message); } } } else { // The remote RS has no genId. We don't change anything for the // current RS. } } else { // The local RS is not initialized - take the one received // WARNING: Must be done before computing topo message to send // to peer server as topo message must embed valid generation id // for our server oldGenerationId = replicationServerDomain.changeGenerationId(generationId, false); } checkGenerationId(); } return inTopoMsg; } /** * Checks local generation ID against the remote RS one, * and logs Warning messages if needed. */ private void checkGenerationId() { if (localGenerationId > 0) { // if the local RS is initialized if (generationId > 0) { // if the remote RS is initialized if (generationId != localGenerationId) { // if the 2 RS have different generationID if (replicationServerDomain.getGenerationIdSavedStatus()) { /* if the present RS has received changes regarding its gen ID and so won't change without a reset then we are just degrading the peer. */ Message message = WARN_BAD_GENERATION_ID_FROM_RS.get( serverId, session.getReadableRemoteAddress(), generationId, getServiceId(), getReplicationServerId(), localGenerationId); logError(message); } else { /* The present RS has never received changes regarding its gen ID. Example case: - we are in RS1 - RS2 has genId2 from LS2 (genId2 <=> no data in LS2) - RS1 has genId1 from LS1 /genId1 comes from data in suffix - we are in RS1 and we receive a START msg from RS2 - Each RS keeps its genID / is degraded and when LS2 will be populated from LS1 everything will become ok. Issue: FIXME : Would it be a good idea in some cases to just set the gen ID received from the peer RS specially if the peer has a non null state and we have a null state ? replicationServerDomain. setGenerationId(generationId, false); */ Message message = WARN_BAD_GENERATION_ID_FROM_RS.get( serverId, session.getReadableRemoteAddress(), generationId, getServiceId(), getReplicationServerId(), localGenerationId); logError(message); } } } else { /* The remote RS has no genId. We don't change anything for the current RS. */ } } else { /* The local RS is not initialized - take the one received WARNING: Must be done before computing topo message to send to peer server as topo message must embed valid generation id for our server */ oldGenerationId = replicationServerDomain.changeGenerationId(generationId, false); } } /** * {@inheritDoc} opends/src/server/org/opends/server/replication/server/StatusAnalyzer.java
@@ -23,7 +23,7 @@ * * * Copyright 2008-2009 Sun Microsystems, Inc. * Portions Copyright 2011-2012 ForgeRock AS * Portions Copyright 2011-2013 ForgeRock AS */ package org.opends.server.replication.server; @@ -133,7 +133,7 @@ if (debugEnabled()) { TRACER.debugInfo("Status analyzer for dn " + replicationServerDomain.getBaseDn().toString() + " DS " + replicationServerDomain.getBaseDn() + " DS " + Integer.toString(serverHandler.getServerId()) + " has " + nChanges + " message(s) in writer queue. This is in RS " + replicationServerDomain.getReplicationServer().getServerId()); @@ -162,7 +162,7 @@ { // Finish job and let thread die TRACER.debugInfo("Status analyzer for dn " + replicationServerDomain.getBaseDn().toString() + replicationServerDomain.getBaseDn() + " has been interrupted and will die. This is in RS " + replicationServerDomain.getReplicationServer().getServerId()); break; @@ -180,7 +180,7 @@ { // Finish job and let thread die TRACER.debugInfo("Status analyzer for dn " + replicationServerDomain.getBaseDn().toString() + replicationServerDomain.getBaseDn() + " has been interrupted and will die. This is in RS " + replicationServerDomain.getReplicationServer().getServerId()); break; @@ -193,7 +193,7 @@ done = true; TRACER.debugInfo("Status analyzer for dn " + replicationServerDomain.getBaseDn().toString() + " is terminated." + replicationServerDomain.getBaseDn() + " is terminated." + " This is in RS " + replicationServerDomain.getReplicationServer().getServerId()); } @@ -227,7 +227,7 @@ { int FACTOR = 40; // Wait for 2 seconds before interrupting the thread int n = 0; while ((done == false) && (this.isAlive())) while (!done && this.isAlive()) { Thread.sleep(50); n++; @@ -249,7 +249,7 @@ * Sets the threshold value. * @param degradedStatusThreshold The new threshold value. */ public void setDeradedStatusThreshold(int degradedStatusThreshold) public void setDegradedStatusThreshold(int degradedStatusThreshold) { if (debugEnabled()) {