*/
package mir.storage;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.GregorianCalendar;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-import java.util.Vector;
-
import mir.config.MirPropertiesConfiguration;
-import mir.config.MirPropertiesConfiguration.PropertiesConfigExc;
import mir.entity.Entity;
import mir.entity.EntityList;
import mir.entity.StorableObjectEntity;
import mir.log.LoggerWrapper;
import mir.misc.StringUtil;
-import mir.storage.store.ObjectStore;
-import mir.storage.store.StorableObject;
-import mir.storage.store.StoreContainerType;
-import mir.storage.store.StoreIdentifier;
-import mir.storage.store.StoreUtil;
+import mir.storage.store.*;
import mir.util.JDBCStringRoutines;
+import mircoders.global.MirGlobal;
-import com.codestudio.util.SQLManager;
-
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.sql.*;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.*;
/**
- * Diese Klasse implementiert die Zugriffsschicht auf die Datenbank.
- * Alle Projektspezifischen Datenbankklassen erben von dieser Klasse.
- * In den Unterklassen wird im Minimalfall nur die Tabelle angegeben.
- * Im Konfigurationsfile findet sich eine Verweis auf den verwendeten
- * Treiber, Host, User und Passwort, ueber den der Zugriff auf die
- * Datenbank erfolgt.
+ * Implements database access.
*
- * @version $Id: Database.java,v 1.44.2.22 2004/02/08 21:05:01 zapata Exp $
+ * @version $Id: Database.java,v 1.44.2.23 2004/11/21 22:07:13 zapata Exp $
* @author rk
*
*/
private static final int _millisPerHour = 60 * 60 * 1000;
protected LoggerWrapper logger;
+
protected MirPropertiesConfiguration configuration;
protected String mainTable;
protected String primaryKeySequence = null;
protected String primaryKeyField = "id";
- protected boolean evaluatedMetaData = false;
- protected ArrayList metadataFields;
- protected ArrayList metadataLabels;
- protected ArrayList metadataNotNullFields;
- protected int[] metadataTypes;
+ protected List fieldNames;
+ protected int[] fieldTypes;
+
protected Class theEntityClass;
protected boolean hasTimestamp = true;
private int defaultLimit;
* erzeugt.
*/
public Database() throws StorageObjectFailure {
- try {
- configuration = MirPropertiesConfiguration.instance();
- }
- catch (PropertiesConfigExc e) {
- throw new StorageObjectFailure(e);
- }
+ configuration = MirPropertiesConfiguration.instance();
logger = new LoggerWrapper("Database");
timezone = TimeZone.getTimeZone(configuration.getString("Mir.DefaultTimezone"));
internalDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
}
/**
- * Liefert Feldtypen der Felder der Tabelle zurueck (s.a. java.sql.Types)
- * @return int-Array mit den Typen der Felder
- * @exception StorageObjectFailure
- */
- public int[] getTypes() throws StorageObjectFailure {
- if (metadataTypes == null) {
- get_meta_data();
- }
-
- return metadataTypes;
- }
-
- /**
- * Liefert eine Liste der Labels der Tabellenfelder
+ * {@inheritDoc}
*/
- public List getLabels() throws StorageObjectFailure {
- if (metadataLabels == null) {
- get_meta_data();
+ public List getFieldNames() throws StorageObjectFailure {
+ if (fieldNames == null) {
+ retrieveMetaData();
}
- return metadataLabels;
- }
-
- /**
- * Liefert eine Liste der Felder der Tabelle
- * @return ArrayList mit Feldern
- */
- public List getFields() throws StorageObjectFailure {
- if (metadataFields == null) {
- get_meta_data();
- }
-
- return metadataFields;
+ return fieldNames;
}
/**
}
Statement stmt = null;
- Connection con = getPooledCon();
+ Connection con = obtainConnection();
Entity returnEntity = null;
try {
rs = executeSql(stmt, selectSql);
if (rs != null) {
- if (evaluatedMetaData == false) {
- evalMetaData(rs.getMetaData());
- }
-
if (rs.next()) {
returnEntity = makeEntityFromResultSet(rs);
}
return selectByWhereClause( mainTablePrefix, extraTables, aWhereClause, "", 0, defaultLimit);
}
- /**
- * select-Operator um Datensaetze zu bekommen, die key = value erfuellen.
- * @param aField Datenbankfeld der Bedingung.
- * @param aValue Wert die der key anehmen muss.
- * @return EntityList mit den gematchten Entities
- */
public EntityList selectByFieldValue(String aField, String aValue) throws StorageObjectFailure {
return selectByFieldValue(aField, aValue, 0);
}
- /**
- * select-Operator um Datensaetze zu bekommen, die key = value erfuellen.
- * @param aField Datenbankfeld der Bedingung.
- * @param aValue Wert die der key anehmen muss.
- * @param offset Gibt an ab welchem Datensatz angezeigt werden soll.
- * @return EntityList mit den gematchten Entities
- */
public EntityList selectByFieldValue(String aField, String aValue, int offset) throws StorageObjectFailure {
- return selectByWhereClause(aField + "=" + aValue, offset);
+ return selectByWhereClause(aField + "='" + JDBCStringRoutines.escapeStringLiteral(aValue)+"'", offset);
}
/**
* select-Operator returns EntityList with matching rows in Database.
* @param aWhereClause where-Clause
* @param anOrderByClause orderBy-Clause
- * @param offset ab welchem Datensatz
- * @param limit wieviele Datens?tze
+ * @param anOffset ab welchem Datensatz
+ * @param aLimit wieviele Datens?tze
* @return EntityList mit den gematchten Entities
* @exception StorageObjectFailure
*/
- public EntityList selectByWhereClause(String mainTablePrefix, List extraTables,
+ public EntityList selectByWhereClause(
+ String aMainTablePrefix, List anExtraTables,
String aWhereClause, String anOrderByClause,
- int offset, int limit) throws StorageObjectFailure {
+ int anOffset, int aLimit) throws StorageObjectFailure {
- // TODO get rid of emtpy Strings in extraTables
- // make extraTables null, if single empty String in it
+ // TODO get rid of emtpy Strings in anExtraTables
+ // make anExtraTables null, if single empty String in it
// cause StringUtil.splitString puts in emptyString
- if (extraTables != null && ((String)extraTables.get(0)).trim().equals(""))
- {
- logger.debug("+++ made extraTables to null!");
- extraTables=null;
- }
- String useTable = mainTable;
- String selectStar = "*";
- if (mainTablePrefix!=null && mainTablePrefix.trim().length()>0) {
- useTable+=" "+mainTablePrefix;
- selectStar=mainTablePrefix.trim() + ".*";
- }
+ if (anExtraTables!=null && ((String) anExtraTables.get(0)).trim().equals("")){
+ logger.debug("+++ made anExtraTables to null!");
+ anExtraTables=null;
+ }
+
+ String useTable = mainTable;
+ String selection = "*";
+
+ if (aMainTablePrefix != null && aMainTablePrefix.trim().length() > 0) {
+ useTable += " " + aMainTablePrefix;
+ selection = aMainTablePrefix.trim() + ".*";
+ }
// check o_store for entitylist
// only if no relational select
- if (extraTables==null) {
+ if (anExtraTables==null) {
if (StoreUtil.extendsStorableEntity(theEntityClass)) {
StoreIdentifier searchSid = new StoreIdentifier(theEntityClass,
StoreContainerType.STOC_TYPE_ENTITYLIST,
StoreUtil.getEntityListUniqueIdentifierFor(mainTable,
- aWhereClause, anOrderByClause, offset, limit));
+ aWhereClause, anOrderByClause, anOffset, aLimit));
EntityList hit = (EntityList) o_store.use(searchSid);
if (hit != null) {
// local
EntityList theReturnList = null;
- Connection con = null;
- Statement stmt = null;
- ResultSet rs;
+ Connection connection = null;
+ Statement statement = null;
+ ResultSet resultSet;
+
int offsetCount = 0;
int count = 0;
StringBuffer countSql =
new StringBuffer("select count(*) from ").append(useTable);
StringBuffer selectSql =
- new StringBuffer("select "+selectStar+" from ").append(useTable);
+ new StringBuffer("select "+selection+" from ").append(useTable);
// append extratables, if necessary
- if (extraTables!=null) {
- for (int i=0;i < extraTables.size();i++) {
- if (!extraTables.get(i).equals("")) {
- countSql.append( ", " + extraTables.get(i));
- selectSql.append( ", " + extraTables.get(i));
+ if (anExtraTables!=null) {
+ for (int i=0;i < anExtraTables.size();i++) {
+ if (!anExtraTables.get(i).equals("")) {
+ countSql.append( ", " + anExtraTables.get(i));
+ selectSql.append( ", " + anExtraTables.get(i));
}
}
}
selectSql.append(" order by ").append(anOrderByClause);
}
- if ((limit > -1) && (offset > -1)) {
- selectSql.append(" LIMIT ").append(limit).append(" OFFSET ").append(offset);
+ if ((aLimit > -1) && (anOffset > -1)) {
+ selectSql.append(" LIMIT ").append(aLimit).append(" OFFSET ").append(anOffset);
}
// execute sql
try {
- con = getPooledCon();
- stmt = con.createStatement();
+ connection = obtainConnection();
+ statement = connection.createStatement();
// selecting...
- rs = executeSql(stmt, selectSql.toString());
-
- if (rs != null) {
- if (!evaluatedMetaData) {
- evalMetaData(rs.getMetaData());
- }
+ resultSet = executeSql(statement, selectSql.toString());
+ if (resultSet != null) {
theReturnList = new EntityList();
Entity theResultEntity;
- while (rs.next()) {
- theResultEntity = makeEntityFromResultSet(rs);
+ while (resultSet.next()) {
+ theResultEntity = makeEntityFromResultSet(resultSet);
theReturnList.add(theResultEntity);
offsetCount++;
}
- rs.close();
+ resultSet.close();
}
// making entitylist infos
// now we decide if we have to know an overall count...
count = offsetCount;
- if ((limit > -1) && (offset > -1)) {
- if (offsetCount == limit) {
- rs = executeSql(stmt, countSql.toString());
+ if ((aLimit > -1) && (anOffset > -1)) {
+ if (offsetCount == aLimit) {
+ resultSet = executeSql(statement, countSql.toString());
- if (rs != null) {
- if (rs.next()) {
- count = rs.getInt(1);
+ if (resultSet != null) {
+ if (resultSet.next()) {
+ count = resultSet.getInt(1);
}
- rs.close();
+ resultSet.close();
}
else {
logger.error("Could not count: " + countSql);
}
theReturnList.setCount(count);
- theReturnList.setOffset(offset);
+ theReturnList.setOffset(anOffset);
theReturnList.setWhere(aWhereClause);
theReturnList.setOrder(anOrderByClause);
theReturnList.setStorage(this);
- theReturnList.setLimit(limit);
+ theReturnList.setLimit(aLimit);
- if (offset >= limit) {
- theReturnList.setPrevBatch(offset - limit);
+ if (anOffset >= aLimit) {
+ theReturnList.setPrevBatch(anOffset - aLimit);
}
- if ((offset + offsetCount) < count) {
- theReturnList.setNextBatch(offset + limit);
+ if ((anOffset + offsetCount) < count) {
+ theReturnList.setNextBatch(anOffset + aLimit);
}
- if (extraTables==null && StoreUtil.extendsStorableEntity(theEntityClass)) {
+ if (anExtraTables==null && StoreUtil.extendsStorableEntity(theEntityClass)) {
StoreIdentifier sid = theReturnList.getStoreIdentifier();
logger.debug("CACHE (add): " + sid.toString());
o_store.add(sid);
}
finally {
try {
- if (con != null) {
- freeConnection(con, stmt);
+ if (connection != null) {
+ freeConnection(connection, statement);
}
} catch (Throwable t) {
}
throws StorageObjectFailure {
Map theResultHash = new HashMap();
String theResult = null;
- int theType;
+ int type;
Entity returnEntity = null;
try {
if (hit != null) return hit;
}
-
- int size = metadataFields.size();
-
- for (int i = 0; i < size; i++) {
+ for (int i = 0; i < getFieldNames().size(); i++) {
// alle durchlaufen bis nix mehr da
- theType = metadataTypes[i];
+ type = fieldTypes[i];
- if (theType == java.sql.Types.LONGVARBINARY) {
+ if (type == java.sql.Types.LONGVARBINARY) {
InputStreamReader is =
(InputStreamReader) rs.getCharacterStream(i + 1);
is.close();
theResult = theResultString.toString();
- } else {
+ }
+ else {
theResult = null;
}
- } else {
- theResult = getValueAsString(rs, (i + 1), theType);
+ }
+ else {
+ theResult = getValueAsString(rs, (i + 1), type);
}
if (theResult != null) {
- theResultHash.put(metadataFields.get(i), theResult);
+ theResultHash.put(getFieldNames().get(i), theResult);
}
}
* @return der Wert des Primary-keys der eingef?gten Entity
*/
public String insert(Entity theEntity) throws StorageObjectFailure {
- //cache
- invalidatePopupCache();
-
- // invalidating all EntityLists corresponding with theEntityClass
- if (StoreUtil.extendsStorableEntity(theEntityClass)) {
- StoreContainerType stoc_type =
- StoreContainerType.valueOf(theEntityClass,
- StoreContainerType.STOC_TYPE_ENTITYLIST);
- o_store.invalidate(stoc_type);
- }
+ invalidateStore();
String returnId = null;
Connection con = null;
boolean firstField = true;
// make sql-string
- for (int i = 0; i < getFields().size(); i++) {
- aField = (String) getFields().get(i);
+ for (int i = 0; i < getFieldNames().size(); i++) {
+ aField = (String) getFieldNames().get(i);
if (!aField.equals(primaryKeyField)) {
aValue = null;
String sql = sqlBuf.toString();
logger.info("INSERT: " + sql);
- con = getPooledCon();
+ con = obtainConnection();
con.setAutoCommit(false);
pstmt = con.prepareStatement(sql);
* that chooses to either insert or update depending if we
* have a primary key in the entity. i don't know if we
* still need the streamed input fields. // rk */
+
/** todo extension: check if Entity did change, otherwise we don't need
* the roundtrip to the database */
/** invalidating corresponding entitylists in o_store*/
- if (StoreUtil.extendsStorableEntity(theEntityClass)) {
- StoreContainerType stoc_type =
- StoreContainerType.valueOf(theEntityClass,
- StoreContainerType.STOC_TYPE_ENTITYLIST);
- o_store.invalidate(stoc_type);
- }
+
+ invalidateStore();
String id = theEntity.getId();
String aField;
StringBuffer fv = new StringBuffer();
boolean firstField = true;
- //cache
- invalidatePopupCache();
-
// build sql statement
- for (int i = 0; i < getFields().size(); i++) {
- aField = (String) metadataFields.get(i);
+ for (int i = 0; i < getFieldNames().size(); i++) {
+ aField = (String) getFieldNames().get(i);
// only normal cases
// todo if entity.hasFieldValue returns false, then the value should be stored as null
new StringBuffer("update ").append(mainTable).append(" set ").append(fv);
// exceptions
- if (metadataFields.contains("webdb_lastchange")) {
+ if (getFieldNames().contains("webdb_lastchange")) {
sql.append(",webdb_lastchange=NOW()");
}
// special case: the webdb_create requires the field in yyyy-mm-dd HH:mm
// format so anything extra will be ignored. -mh
- if (metadataFields.contains("webdb_create") &&
+ if (getFieldNames().contains("webdb_create") &&
theEntity.hasFieldValue("webdb_create")) {
// minimum of 10 (yyyy-mm-dd)...
if (theEntity.getFieldValue("webdb_create").length() >= 10) {
logger.info("UPDATE: " + sql);
try {
- con = getPooledCon();
+ con = obtainConnection();
con.setAutoCommit(false);
pstmt = con.prepareStatement(sql.toString());
* @return boolean liefert true zurueck, wenn loeschen erfolgreich war.
*/
public boolean delete(String id) throws StorageObjectFailure {
- invalidatePopupCache();
-
// ostore send notification
if (StoreUtil.extendsStorableEntity(theEntityClass)) {
String uniqueId = id;
String sql =
"delete from " + mainTable + " where " + primaryKeyField + "='" + id + "'";
- //theLog.printInfo("DELETE " + sql);
+ logger.debug("DELETE " + sql);
try {
- con = getPooledCon();
+ con = obtainConnection();
stmt = con.createStatement();
res = stmt.executeUpdate(sql);
- } catch (SQLException sqe) {
+ }
+ catch (SQLException sqe) {
throwSQLException(sqe, "delete");
- } finally {
+ }
+ finally {
freeConnection(con, stmt);
}
+ invalidateStore();
+
return (res > 0) ? true : false;
}
* @throws StorageObjectFailure
*/
public int deleteByWhereClause(String aWhereClause) throws StorageObjectFailure {
- invalidatePopupCache();
- if (StoreUtil.extendsStorableEntity(theEntityClass)) {
- StoreContainerType stoc_type = StoreContainerType.valueOf(theEntityClass, StoreContainerType.STOC_TYPE_ENTITYLIST);
- o_store.invalidate(stoc_type);
- }
+ invalidateStore();
Statement stmt = null;
Connection con = null;
//theLog.printInfo("DELETE " + sql);
try {
- con = getPooledCon();
+ con = obtainConnection();
stmt = con.createStatement();
res = stmt.executeUpdate(sql);
}
* @return immer false
*/
public boolean delete(EntityList theEntityList) {
- invalidatePopupCache();
-
return false;
}
- protected void invalidatePopupCache() {
- /** todo invalidates toooo much */
- }
-
/**
* Diese Methode fuehrt den Sqlstring <i>sql</i> aus und timed im Logfile.
* @param stmt Statemnt
Connection connection = null;
Statement statement = null;
try {
- List result = new Vector();
- connection = getPooledCon();
+ List result = new ArrayList();
+ connection = obtainConnection();
statement = connection.createStatement();
ResultSet resultset = executeSql(statement, sql);
try {
String useTable = mainTable;
if (mainTablePrefix!=null && mainTablePrefix.trim().length()>0) {
- useTable+=" "+mainTablePrefix;
+ useTable+=" "+mainTablePrefix;
}
StringBuffer countSql =
new StringBuffer("select count(*) from ").append(useTable);
int result = 0;
try {
- con = getPooledCon();
+ con = obtainConnection();
stmt = con.createStatement();
ResultSet rs = executeSql(stmt, countSql.toString());
PreparedStatement pstmt = null;
try {
- con = getPooledCon();
+ con = obtainConnection();
pstmt = con.prepareStatement(sql);
result = pstmt.executeUpdate();
}
}
/**
- * Wertet ResultSetMetaData aus und setzt interne Daten entsprechend
- * @param md ResultSetMetaData
+ * Processes the metadata for the table this Database object is responsible for.
*/
- private void evalMetaData(ResultSetMetaData md) throws StorageObjectFailure {
- this.evaluatedMetaData = true;
- this.metadataFields = new ArrayList();
- this.metadataLabels = new ArrayList();
- this.metadataNotNullFields = new ArrayList();
+ private void processMetaData(ResultSetMetaData aMetaData) throws StorageObjectFailure {
+ fieldNames = new ArrayList();
try {
- int numFields = md.getColumnCount();
- this.metadataTypes = new int[numFields];
-
- String aField;
- int aType;
+ int numFields = aMetaData.getColumnCount();
+ fieldTypes = new int[numFields];
for (int i = 1; i <= numFields; i++) {
- aField = md.getColumnName(i);
- metadataFields.add(aField);
- metadataLabels.add(md.getColumnLabel(i));
- aType = md.getColumnType(i);
- metadataTypes[i - 1] = aType;
-
- if (aField.equals(primaryKeyField)) {
- }
-
- if (md.isNullable(i) == ResultSetMetaData.columnNullable) {
- metadataNotNullFields.add(aField);
- }
+ fieldNames.add(aMetaData.getColumnName(i));
+ fieldTypes[i - 1] = aMetaData.getColumnType(i);
}
}
catch (SQLException e) {
- throwSQLException(e, "evalMetaData");
+ throwSQLException(e, "processMetaData");
}
}
/**
- * Wertet die Metadaten eines Resultsets fuer eine Tabelle aus,
- * um die alle Columns und Typen einer Tabelle zu ermitteln.
+ * Retrieves metadata from the table this Database object represents
*/
- private void get_meta_data() throws StorageObjectFailure {
- Connection con = null;
- PreparedStatement pstmt = null;
+ private void retrieveMetaData() throws StorageObjectFailure {
+ Connection connection = null;
+ PreparedStatement statement = null;
String sql = "select * from " + mainTable + " where 0=1";
try {
- con = getPooledCon();
- pstmt = con.prepareStatement(sql);
+ connection = obtainConnection();
+ statement = connection.prepareStatement(sql);
logger.debug("METADATA: " + sql);
- ResultSet rs = pstmt.executeQuery();
- evalMetaData(rs.getMetaData());
- rs.close();
+ ResultSet resultSet = statement.executeQuery();
+ try {
+ processMetaData(resultSet.getMetaData());
+ }
+ finally {
+ resultSet.close();
+ }
}
catch (SQLException e) {
- throwSQLException(e, "get_meta_data");
+ throwSQLException(e, "retrieveMetaData");
}
finally {
- freeConnection(con, pstmt);
+ freeConnection(connection, statement);
}
}
- public Connection getPooledCon() throws StorageObjectFailure {
- Connection con = null;
-
+ public Connection obtainConnection() throws StorageObjectFailure {
try {
- con = SQLManager.getInstance().requestConnection();
+ return MirGlobal.getDatabaseEngine().obtainConnection();
}
- catch (SQLException e) {
- logger.error("could not connect to the database " + e.getMessage());
-
- throw new StorageObjectFailure("Could not connect to the database", e);
+ catch (Exception e) {
+ throw new StorageObjectFailure(e);
}
-
- return con;
}
- public void freeConnection(Connection con, Statement stmt)
- throws StorageObjectFailure {
- SQLManager.closeStatement(stmt);
- SQLManager.getInstance().returnConnection(con);
+ public void freeConnection(Connection aConnection, Statement aStatement) throws StorageObjectFailure {
+ try {
+ aStatement.close();
+ }
+ catch (Throwable t) {
+ logger.warn("Can't close statemnet: " + t.toString());
+ }
+
+ try {
+ MirGlobal.getDatabaseEngine().releaseConnection(aConnection);
+ }
+ catch (Throwable t) {
+ logger.warn("Can't release connection: " + t.toString());
+ }
}
/**
logger.error(aMessage);
throw new StorageObjectFailure(aMessage, null);
}
+
+ /**
+ * Invalidates any cached entity list
+ */
+ private void invalidateStore() {
+ // invalidating all EntityLists corresponding with theEntityClass
+ if (StoreUtil.extendsStorableEntity(theEntityClass)) {
+ StoreContainerType stoc_type =
+ StoreContainerType.valueOf(theEntityClass, StoreContainerType.STOC_TYPE_ENTITYLIST);
+ o_store.invalidate(stoc_type);
+ }
+ }
+
+ /**
+ * Retrieves a binary value
+ */
+ public InputStream getBinaryField(String aQuery) throws StorageObjectFailure, SQLException {
+ Connection connection=null;
+ Statement statement=null;
+ InputStream inputStream;
+ InputStream imageInputStream = null;
+
+ try {
+ connection = obtainConnection();
+ try {
+ connection.setAutoCommit(false);
+ statement = connection.createStatement();
+ ResultSet resultSet = executeSql(statement, aQuery);
+
+ if(resultSet!=null) {
+ if (resultSet.next()) {
+ inputStream = resultSet.getBlob(1).getBinaryStream();
+ imageInputStream = new BinaryFieldInputStream(inputStream, connection, statement);
+ }
+ resultSet.close();
+ }
+ }
+ finally {
+ }
+ }
+ catch (Throwable t) {
+ logger.error("EntityImages.getImage failed: " + t.toString());
+ t.printStackTrace(logger.asPrintWriter(LoggerWrapper.DEBUG_MESSAGE));
+
+ try {
+ connection.setAutoCommit(true);
+ }
+ catch (Throwable e) {
+ logger.error("EntityImages.getImage resetting transaction mode failed: " + e.toString());
+ e.printStackTrace(logger.asPrintWriter(LoggerWrapper.DEBUG_MESSAGE));
+ }
+
+ try {
+ freeConnection(connection, statement);
+ }
+ catch (Throwable e) {
+ logger.error("EntityImages.getImage freeing connection failed: " +e.toString());
+ }
+
+ throw new StorageObjectFailure(t);
+ }
+
+ return imageInputStream;
+ }
+
+ /**
+ * Sets a binary value. The query is supposed to contain 1 ? denoting where the
+ * binary value should be inserted.
+ *
+ * e.g. <code>update images set image_data = ? where id= 22</code>
+ */
+ public void setBinaryField(String aQuery, byte aData[]) throws StorageObjectFailure, SQLException {
+ PreparedStatement statement = null;
+ Connection connection = obtainConnection();
+ try {
+ connection.setAutoCommit(false);
+ try {
+ statement = connection.prepareStatement(aQuery);
+ statement.setBinaryStream(1, new ByteArrayInputStream(aData), aData.length);
+ statement.execute();
+ connection.commit();
+ }
+ finally {
+ connection.setAutoCommit(true);
+ }
+ }
+ finally {
+ freeConnection(connection, statement);
+ }
+ }
+
+ /**
+ * a small wrapper class that allows us to store the DB connection resources
+ * that the BlobInputStream is using and free them upon closing of the stream
+ */
+ private class BinaryFieldInputStream extends InputStream {
+ InputStream inputStream;
+ Connection connection;
+ Statement statement;
+
+ public BinaryFieldInputStream(InputStream aBlobInputStream, Connection aConnection, Statement aStatement ) {
+ inputStream = aBlobInputStream;
+ connection = aConnection;
+ statement = aStatement;
+ }
+
+ public void close () throws IOException {
+ inputStream.close();
+ try {
+ connection.setAutoCommit(true);
+ freeConnection(connection, statement);
+ }
+ catch (Exception e) {
+ throw new IOException("close(): "+e.toString());
+ }
+ }
+
+ public int read() throws IOException {
+ return inputStream.read();
+ }
+ }
}