mirror of https://github.com/OpenIdentityPlatform/OpenDJ.git

586704a82f420874b18219bf80599bc4bc2d67e6..54c54fda42e49d077b1784f4940d20317e701d25
19 hours ago Prashant
Migrate to caffeine 3 (#594)
54c54f diff | tree
23 hours ago Prashant
Bump logback to 1.5.32 (#595)
79f0cf diff | tree
5 files modified
347 ■■■■ changed files
opendj-embedded/pom.xml 4 ●●●● patch | view | raw | blame | history
opendj-server-legacy/pom.xml 13 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/cassandra/Storage.java 123 ●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/jdbc/CachedConnection.java 72 ●●●●● patch | view | raw | blame | history
opendj-server-legacy/src/main/java/org/opends/server/backends/jdbc/Storage.java 135 ●●●● patch | view | raw | blame | history
opendj-embedded/pom.xml
@@ -38,7 +38,7 @@
        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-core</artifactId>
            <version>1.5.29</version>
            <version>1.5.32</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
@@ -49,7 +49,7 @@
        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-classic</artifactId>
            <version>1.5.29</version>
            <version>1.5.32</version>
            <exclusions>
                <exclusion>
                    <artifactId>slf4j-api</artifactId>
opendj-server-legacy/pom.xml
@@ -193,17 +193,18 @@
      <artifactId>handler-jdbc</artifactId>
    </dependency>
    <!-- slf4j libraries -->
<!--     <dependency> -->
<!--       <groupId>org.slf4j</groupId> -->
<!--       <artifactId>slf4j-jdk14</artifactId> -->
<!--     </dependency> -->
    <dependency>
      <groupId>org.slf4j</groupId>
      <artifactId>jul-to-slf4j</artifactId>
    </dependency>
    <!-- Source: https://mvnrepository.com/artifact/com.github.ben-manes.caffeine/caffeine -->
    <dependency>
      <groupId>com.github.ben-manes.caffeine</groupId>
      <artifactId>caffeine</artifactId>
      <version>3.2.3</version>
      <scope>compile</scope>
    </dependency>
    <!-- mail -->
    <dependency>
opendj-server-legacy/src/main/java/org/opends/server/backends/cassandra/Storage.java
@@ -62,21 +62,19 @@
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Row;
import com.datastax.oss.driver.api.core.cql.Statement;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
public class Storage implements org.opends.server.backends.pluggable.spi.Storage, ConfigurationChangeListener<CASBackendCfg>{
    
    private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
    //private final ServerContext serverContext;
    private CASBackendCfg config;
    public Storage(CASBackendCfg cfg, ServerContext serverContext) {
        //this.serverContext = serverContext;
        this.config = cfg;
        cfg.addCASChangeListener(this);
        cfg.addCASChangeListener(this);
    }
    //config
@@ -88,37 +86,32 @@
    @Override
    public ConfigChangeResult applyConfigurationChange(CASBackendCfg cfg) {
        final ConfigChangeResult ccr = new ConfigChangeResult();
        try
        {
            this.config = cfg;
        }
        catch (Exception e)
        {
          addErrorMessage(ccr, LocalizableMessage.raw(stackTraceToSingleLineString(e)));
        }
        return ccr;
        try
        {
            this.config = cfg;
        }
        catch (Exception e)
        {
            addErrorMessage(ccr, LocalizableMessage.raw(stackTraceToSingleLineString(e)));
        }
        return ccr;
    }
    CqlSession session=null;
    final LoadingCache<String,PreparedStatement> prepared=CacheBuilder.newBuilder()
            .expireAfterAccess(Duration.ofMinutes(10))
            .maximumSize(4096)
            .build(new CacheLoader<String,PreparedStatement>(){
                @Override
                public PreparedStatement load(String query) throws Exception {
                    return session.prepare(query);
                }
            });
    final LoadingCache<String,PreparedStatement> prepared = Caffeine.newBuilder()
        .expireAfterAccess(Duration.ofMinutes(10))
        .maximumSize(4096)
        .build(query -> session.prepare(query));
    ResultSet execute(Statement<?> statement) {
        if (logger.isTraceEnabled()) {
            final ResultSet res=session.execute(statement.setTracing(true));
            logger.trace(LocalizableMessage.raw(
                    "cassandra: %s"
                    ,res.getExecutionInfo().getQueryTrace().getParameters()
                    )
                );
                )
            );
            return res;
        }
        return session.execute(statement);
@@ -129,11 +122,11 @@
    public void open(AccessMode accessMode) throws Exception {
        this.accessMode=accessMode;
        session=CqlSession.builder()
                .withApplicationName("OpenDJ "+getKeyspaceName()+"."+config.getBackendId())
                .withConfigLoader(DriverConfigLoader.fromDefaults(Storage.class.getClassLoader()))
                .build();
            .withApplicationName("OpenDJ "+getKeyspaceName()+"."+config.getBackendId())
            .withConfigLoader(DriverConfigLoader.fromDefaults(Storage.class.getClassLoader()))
            .build();
        if (AccessMode.READ_WRITE.equals(accessMode)) {
            execute(prepared.getUnchecked("CREATE KEYSPACE IF NOT EXISTS "+getKeyspaceName()+" WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};").bind().setExecutionProfileName(profile));
            execute(prepared.get("CREATE KEYSPACE IF NOT EXISTS "+getKeyspaceName()+" WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};").bind().setExecutionProfileName(profile));
        }
        storageStatus = StorageStatus.working();
    }
@@ -163,7 +156,7 @@
    
    @Override
    public void removeStorageFiles() throws StorageRuntimeException {
        final Boolean isOpen=getStorageStatus().isWorking();
        final boolean isOpen=getStorageStatus().isWorking();
        if (!isOpen) {
            try {
                open(AccessMode.READ_WRITE);
@@ -172,7 +165,7 @@
            }
        }
        try {
            execute(prepared.getUnchecked("TRUNCATE TABLE "+getTableName()+";").bind().setExecutionProfileName(profile));
            execute(prepared.get("TRUNCATE TABLE "+getTableName()+";").bind().setExecutionProfileName(profile));
        }catch (Throwable e) {}
        if (!isOpen) {
            close();
@@ -210,7 +203,7 @@
        @Override
        public void openTree(TreeName name, boolean createOnDemand) {
            if (createOnDemand) {
                execute(prepared.getUnchecked("CREATE TABLE IF NOT EXISTS "+getTableName()+" (baseDN text,indexId text,key blob,value blob,PRIMARY KEY ((baseDN,indexId),key));").bind().setExecutionProfileName(profile));
                execute(prepared.get("CREATE TABLE IF NOT EXISTS "+getTableName()+" (baseDN text,indexId text,key blob,value blob,PRIMARY KEY ((baseDN,indexId),key));").bind().setExecutionProfileName(profile));
            }
        }
        
@@ -222,10 +215,10 @@
        @Override
        public ByteString read(TreeName treeName, ByteSequence key) {
            final Row row=execute(
                    prepared.getUnchecked("SELECT value FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId and key=:key").bind()
                        .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                        .setByteBuffer("key", ByteBuffer.wrap(key.toByteArray()))
                    ).one();
                prepared.get("SELECT value FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId and key=:key").bind()
                    .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                    .setByteBuffer("key", ByteBuffer.wrap(key.toByteArray()))
            ).one();
            return row==null?null:ByteString.wrap(row.getByteBuffer("value").array());
        }
@@ -237,9 +230,9 @@
        @Override
        public long getRecordCount(TreeName treeName) {
            return execute(
                    prepared.getUnchecked("SELECT count(*) FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId").bind()
                        .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                    ).one().getLong(0);
                prepared.get("SELECT count(*) FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId").bind()
                    .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
            ).one().getLong(0);
        }
        @Override
@@ -247,20 +240,20 @@
            checkReadOnly();
            openTree(treeName,true);
            execute(
                    prepared.getUnchecked("DELETE FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId").bind()
                        .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                    );
                prepared.get("DELETE FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId").bind()
                    .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
            );
        }
        @Override
        public void put(TreeName treeName, ByteSequence key, ByteSequence value) {
            checkReadOnly();
            execute(
                prepared.getUnchecked("INSERT INTO "+getTableName()+" (baseDN,indexId,key,value) VALUES (:baseDN,:indexId,:key,:value)").bind()
                    .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                prepared.get("INSERT INTO "+getTableName()+" (baseDN,indexId,key,value) VALUES (:baseDN,:indexId,:key,:value)").bind()
                    .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                    .setByteBuffer("key", ByteBuffer.wrap(key.toByteArray()))
                    .setByteBuffer("value",ByteBuffer.wrap(value.toByteArray()))
                );
            );
        }
        @Override
@@ -269,15 +262,15 @@
            final ByteString oldValue=read(treeName,key);
            final ByteSequence newValue=f.computeNewValue(oldValue);
            if (Objects.equals(newValue, oldValue))
            {
            {
                return false;
            }
            if (newValue == null)
            {
                delete(treeName, key);
                return true;
            }
            put(treeName,key,newValue);
            }
            if (newValue == null)
            {
                delete(treeName, key);
                return true;
            }
            put(treeName,key,newValue);
            return true;
        }
@@ -285,10 +278,10 @@
        public boolean delete(TreeName treeName, ByteSequence key) {
            checkReadOnly();
            execute(
                    prepared.getUnchecked("DELETE FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId and key=:key").bind()
                        .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                        .setByteBuffer("key", ByteBuffer.wrap(key.toByteArray()))
                    );
                prepared.get("DELETE FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId and key=:key").bind()
                    .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                    .setByteBuffer("key", ByteBuffer.wrap(key.toByteArray()))
            );
            return true;
        }
        
@@ -316,9 +309,9 @@
        ResultSet full(){
            return execute(
                        prepared.getUnchecked("SELECT key,value FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId ORDER BY key").bind()
                            .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
                        );
                prepared.get("SELECT key,value FROM "+getTableName()+" WHERE baseDN=:baseDN and indexId=:indexId ORDER BY key").bind()
                    .setString("baseDN", treeName.getBaseDN()).setString("indexId", treeName.getIndexId())
            );
        }
        
        @Override
@@ -424,7 +417,7 @@
                    return true;
                }
                ct++;
            }
            }
            current=null;
            return false;
        }
opendj-server-legacy/src/main/java/org/opends/server/backends/jdbc/CachedConnection.java
@@ -15,72 +15,66 @@
 */
package org.opends.server.backends.jdbc;
import com.google.common.cache.*;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.github.benmanes.caffeine.cache.RemovalCause;
import java.sql.*;
import java.util.LinkedList;
import java.time.Duration;
import java.util.Map;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.*;
public class CachedConnection implements Connection {
    final Connection parent;
    static LoadingCache<String, BlockingQueue<CachedConnection>> cached= CacheBuilder.newBuilder()
            .expireAfterAccess(Long.parseLong(System.getProperty("org.openidentityplatform.opendj.jdbc.ttl","15000")), TimeUnit.MILLISECONDS)
            .removalListener(new RemovalListener<String, BlockingQueue<CachedConnection>>() {
                @Override
                public void onRemoval(RemovalNotification<String, BlockingQueue<CachedConnection>> notification) {
                    assert notification.getValue() != null;
                    for (CachedConnection con: notification.getValue()) {
                            try {
                                if (!con.isClosed()) {
                                    con.parent.close();
                                }
                            } catch (SQLException e) {
                            }
                        }
    static LoadingCache<String, BlockingQueue<CachedConnection>> cached = Caffeine.newBuilder()
        .expireAfterAccess(Duration.ofMillis(Long.parseLong(System.getProperty("org.openidentityplatform.opendj.jdbc.ttl","15000"))))
        .removalListener((String key, BlockingQueue<CachedConnection> value, RemovalCause cause) -> {
            for (CachedConnection con : value) {
                try {
                    if (!con.isClosed()) {
                        con.parent.close();
                    }
                } catch (SQLException e) {
                    // ignore
                }
            })
            .build(new CacheLoader<String, BlockingQueue<CachedConnection>>() {
                @Override
                public BlockingQueue<CachedConnection> load(String connectionString) throws Exception {
                    return new LinkedBlockingQueue<>();
                }
            });
            }
        })
        .build(conStr -> new LinkedBlockingQueue<>());
    final String connectionString;
    public CachedConnection(String connectionString,Connection parent) {
        this.connectionString=connectionString;
    public CachedConnection(String connectionString, Connection parent) {
        this.connectionString = connectionString;
        this.parent = parent;
    }
    static Connection getConnection(String connectionString) throws Exception {
        return getConnection(connectionString,0);
        return getConnection(connectionString, 0);
    }
    static Connection getConnection(String connectionString, final int waitTime) throws Exception {
        CachedConnection con=cached.get(connectionString).poll(waitTime,TimeUnit.MILLISECONDS);
        while(con!=null) {
        CachedConnection con = cached.get(connectionString).poll(waitTime, TimeUnit.MILLISECONDS);
        while (con != null) {
            if (!con.isValid(0)) {
                try {
                    con.parent.close();
                } catch (SQLException e) {
                    con=null;
                    con = null;
                }
                con=cached.get(connectionString).poll();
            }else{
                con = cached.get(connectionString).poll();
            } else {
                return con;
            }
        }
        try {
            final Connection conNew= DriverManager.getConnection(connectionString);
            final Connection conNew = DriverManager.getConnection(connectionString);
            conNew.setAutoCommit(false);
            conNew.setTransactionIsolation(TRANSACTION_READ_COMMITTED);
            return new CachedConnection(connectionString, conNew);
        }catch (SQLException e) { //max_connection server error: try recursion for reuse connection
            return getConnection(connectionString,(waitTime==0)?1:waitTime*2);
        } catch (SQLException e) { // max_connection server error: try recursion for reuse connection
            return getConnection(connectionString, (waitTime == 0) ? 1 : waitTime * 2);
        }
    }
@@ -127,11 +121,7 @@
    @Override
    public void close() throws SQLException {
        rollback();
        try {
            cached.get(connectionString).add(this);
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        }
        cached.get(connectionString).add(this);
    }
    @Override
@@ -196,7 +186,7 @@
    @Override
    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
        return parent.prepareCall(sql, resultSetType, resultSetConcurrency) ;
        return parent.prepareCall(sql, resultSetType, resultSetConcurrency);
    }
    @Override
opendj-server-legacy/src/main/java/org/opends/server/backends/jdbc/Storage.java
@@ -15,9 +15,9 @@
 */
package org.opends.server.backends.jdbc;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import org.forgerock.i18n.LocalizableMessage;
import org.forgerock.i18n.slf4j.LocalizedLogger;
import org.forgerock.opendj.config.server.ConfigChangeResult;
@@ -36,9 +36,9 @@
import java.nio.ByteBuffer;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.sql.*;
import java.util.*;
import java.util.concurrent.ExecutionException;
import static org.opends.server.backends.pluggable.spi.StorageUtils.addErrorMessage;
import static org.opends.server.util.StaticUtils.stackTraceToSingleLineString;
@@ -47,11 +47,11 @@
    
    private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
    private JDBCBackendCfg config;
    private JDBCBackendCfg config;
    public Storage(JDBCBackendCfg cfg, ServerContext serverContext) {
        this.config = cfg;
        cfg.addJDBCChangeListener(this);
        this.config = cfg;
        cfg.addJDBCChangeListener(this);
    }
    //config
@@ -63,15 +63,15 @@
    @Override
    public ConfigChangeResult applyConfigurationChange(JDBCBackendCfg cfg) {
        final ConfigChangeResult ccr = new ConfigChangeResult();
        try
        {
            this.config = cfg;
        }
        catch (Exception e)
        {
          addErrorMessage(ccr, LocalizableMessage.raw(stackTraceToSingleLineString(e)));
        }
        return ccr;
        try
        {
            this.config = cfg;
        }
        catch (Exception e)
        {
            addErrorMessage(ccr, LocalizableMessage.raw(stackTraceToSingleLineString(e)));
        }
        return ccr;
    }
    ResultSet executeResultSet(PreparedStatement statement) throws SQLException {
@@ -88,7 +88,7 @@
        return statement.executeUpdate();
    }
    Connection getConnection() throws Exception {
    Connection getConnection() throws Exception {
        return CachedConnection.getConnection(config.getDBDirectory());
    }
@@ -113,29 +113,26 @@
        storageStatus = StorageStatus.lockedDown(LocalizableMessage.raw("closed"));
    }
    final LoadingCache<TreeName,String> tree2table= CacheBuilder.newBuilder()
            .build(new CacheLoader<TreeName, String>() {
                @Override
                public String load(TreeName treeName) throws Exception {
                    final MessageDigest md = MessageDigest.getInstance("SHA-224");
                    final byte[] messageDigest = md.digest(treeName.toString().getBytes());
                    final StringBuilder hashtext = new StringBuilder(56);
                    for (byte b : messageDigest) {
                        String hex = Integer.toHexString(0xff & b);
                        if (hex.length() == 1) hashtext.append('0');
                        hashtext.append(hex);
                    }
                    return "opendj_"+hashtext;
    final LoadingCache<TreeName,String> tree2table = Caffeine.newBuilder()
        .build(treeName -> {
            try {
                final MessageDigest md = MessageDigest.getInstance("SHA-224");
                final byte[] messageDigest = md.digest(treeName.toString().getBytes());
                final StringBuilder hashtext = new StringBuilder(56);
                for (byte b : messageDigest) {
                    String hex = Integer.toHexString(0xff & b);
                    if (hex.length() == 1) hashtext.append('0');
                    hashtext.append(hex);
                }
            });
                return "opendj_" + hashtext;
            } catch (NoSuchAlgorithmException e) {
                throw new RuntimeException(e);
            }
        });
    String getTableName(TreeName treeName) {
        try {
            return tree2table.get(treeName);
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        }
    }
        return tree2table.get(treeName);
    }
    @Override
    public void removeStorageFiles() throws StorageRuntimeException {
@@ -195,7 +192,7 @@
        }
    }
    final static byte[] NULL=new byte[]{(byte)0};
    static final byte[] NULL=new byte[]{(byte)0};
    static byte[] real2db(byte[] real) {
        return real.length==0?NULL:real;
@@ -204,22 +201,24 @@
        return Arrays.equals(NULL,db)?new byte[0]:db;
    }
    final LoadingCache<ByteBuffer,String> key2hash= CacheBuilder.newBuilder()
            .softValues()
            .build(new CacheLoader<ByteBuffer, String>() {
                @Override
                public String load(ByteBuffer key) throws Exception {
                    final MessageDigest md = MessageDigest.getInstance("SHA-512");
                    final byte[] messageDigest = md.digest(key.array());
                    final StringBuilder hashtext = new StringBuilder(128);
                    for (byte b : messageDigest) {
                        String hex = Integer.toHexString(0xff & b);
                        if (hex.length() == 1) hashtext.append('0');
                        hashtext.append(hex);
                    }
                    return hashtext.toString();
    final LoadingCache<ByteBuffer,String> key2hash = Caffeine.newBuilder()
        .softValues()
        .build(key -> {
            try {
                final MessageDigest md = MessageDigest.getInstance("SHA-512");
                final byte[] messageDigest = md.digest(key.array());
                final StringBuilder hashtext = new StringBuilder(128);
                for (byte b : messageDigest) {
                    String hex = Integer.toHexString(0xff & b);
                    if (hex.length() == 1) hashtext.append('0');
                    hashtext.append(hex);
                }
            });
                return hashtext.toString();
            } catch (NoSuchAlgorithmException e) {
                throw new RuntimeException(e);
            }
        });
    private class ReadableTransactionImpl implements ReadableTransaction {
        final Connection con;
        boolean isReadOnly=true;
@@ -236,7 +235,7 @@
                try(ResultSet rc=executeResultSet(statement)) {
                    return rc.next() ? ByteString.wrap(rc.getBytes("v")) : null;
                }
            }catch (SQLException|ExecutionException e) {
            }catch (SQLException e) {
                throw new StorageRuntimeException(e);
            }
        }
@@ -327,12 +326,12 @@
        public void put(TreeName treeName, ByteSequence key, ByteSequence value) {
            try {
                upsert(treeName, key, value);
            } catch (SQLException|ExecutionException e) {
            } catch (SQLException e) {
                throw new RuntimeException(e);
            }
        }
        boolean upsert(TreeName treeName, ByteSequence key, ByteSequence value) throws SQLException, ExecutionException {
        boolean upsert(TreeName treeName, ByteSequence key, ByteSequence value) throws SQLException {
            final String driverName=((CachedConnection) con).parent.getClass().getName();
            if (driverName.contains("postgres")) { //postgres upsert
                try (final PreparedStatement statement = con.prepareStatement("insert into " + getTableName(treeName) + " (h,k,v) values (?,?,?) ON CONFLICT (h, k) DO UPDATE set v=excluded.v")) {
@@ -367,7 +366,7 @@
            }
        }
        boolean insert(TreeName treeName, ByteSequence key, ByteSequence value) throws SQLException, ExecutionException {
        boolean insert(TreeName treeName, ByteSequence key, ByteSequence value) throws SQLException {
            try (final PreparedStatement statement = con.prepareStatement("insert into " + getTableName(treeName) + " (h,k,v) select ?,?,? where not exists (select 1 from "+getTableName(treeName)+" where  h=? and k=? )")) {
                statement.setString(1, key2hash.get(ByteBuffer.wrap(key.toByteArray())));
                statement.setBytes(2, real2db(key.toByteArray()));
@@ -378,7 +377,7 @@
            }
        }
        boolean update(TreeName treeName, ByteSequence key, ByteSequence value) throws SQLException, ExecutionException {
        boolean update(TreeName treeName, ByteSequence key, ByteSequence value) throws SQLException {
            try (final PreparedStatement statement=con.prepareStatement("update "+getTableName(treeName)+" set v=? where h=? and k=?")){
                statement.setBytes(1,value.toByteArray());
                statement.setString(2,key2hash.get(ByteBuffer.wrap(key.toByteArray())));
@@ -392,14 +391,14 @@
            final ByteString oldValue=read(treeName,key);
            final ByteSequence newValue=f.computeNewValue(oldValue);
            if (Objects.equals(newValue, oldValue))
            {
            {
                return false;
            }
            if (newValue == null)
            {
            }
            if (newValue == null)
            {
                return delete(treeName, key);
            }
            put(treeName,key,newValue);
            }
            put(treeName,key,newValue);
            return true;
        }
@@ -409,7 +408,7 @@
                statement.setString(1,key2hash.get(ByteBuffer.wrap(key.toByteArray())));
                statement.setBytes(2,real2db(key.toByteArray()));
                return (execute(statement)==1 && statement.getUpdateCount()>0);
            }catch (SQLException|ExecutionException e) {
            }catch (SQLException e) {
                throw new StorageRuntimeException(e);
            }
        }
@@ -426,8 +425,8 @@
            this.isReadOnly=isReadOnly;
            try {
                statement=con.prepareStatement("select h,k,v from "+getTableName(treeName)+" order by k",
                        isReadOnly?ResultSet.TYPE_SCROLL_INSENSITIVE:ResultSet.TYPE_SCROLL_SENSITIVE,
                        isReadOnly?ResultSet.CONCUR_READ_ONLY:ResultSet.CONCUR_UPDATABLE);
                    isReadOnly?ResultSet.TYPE_SCROLL_INSENSITIVE:ResultSet.TYPE_SCROLL_SENSITIVE,
                    isReadOnly?ResultSet.CONCUR_READ_ONLY:ResultSet.CONCUR_UPDATABLE);
                rc=executeResultSet(statement);
            }catch (SQLException e) {
                throw new StorageRuntimeException(e);