Mercurial > dive4elements > river
diff artifacts/src/main/java/org/dive4elements/river/artifacts/model/minfo/SedimentLoadDataFactory.java @ 8033:5e3f4b4fcb28
New way to loaded sediment loads from database and cache it. The data structure a way more straight forward. TODO: Implement calculation on this basis.
author | Sascha L. Teichmann <teichmann@intevation.de> |
---|---|
date | Tue, 15 Jul 2014 12:47:52 +0200 |
parents | |
children | b6e7cfcabf2c |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/artifacts/src/main/java/org/dive4elements/river/artifacts/model/minfo/SedimentLoadDataFactory.java Tue Jul 15 12:47:52 2014 +0200 @@ -0,0 +1,169 @@ +/* Copyright (C) 2014 by Bundesanstalt für Gewässerkunde + * Software engineering by Intevation GmbH + * + * This file is Free Software under the GNU AGPL (>=v3) + * and comes with ABSOLUTELY NO WARRANTY! Check out the + * documentation coming with Dive4Elements River for details. + */ +package org.dive4elements.river.artifacts.model.minfo; + +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.Iterator; + +import net.sf.ehcache.Cache; +import net.sf.ehcache.Element; + +import org.apache.log4j.Logger; +import org.dive4elements.river.artifacts.cache.CacheFactory; +import org.dive4elements.river.backend.SessionHolder; +import org.hibernate.SQLQuery; +import org.hibernate.Session; +import org.hibernate.type.StandardBasicTypes; + +public class SedimentLoadDataFactory +{ + private static Logger log = Logger.getLogger(SedimentLoadDataFactory.class); + + public static final String CACHE_NAME = "sediment-load-data"; + + public static final String SUSPENDED_STRING = "Schwebstoff"; + + public static final String SQL_LOAD_RIVER_SEDIMENT_LOADS = + "SELECT " + + "sl.id AS sl_id, " + + "sl.description AS sl_description, " + + "ti.start_time AS ti_start_time, " + + "ti.stop_time AS ti_stoptime, " + + "slv.value AS slv_value, " + + "gf.name AS gf_name, " + + "ms.id AS ms_id, " + + "ms.station AS ms_station, " + + "ms.measurement_type AS ms_type " + + "FROM sediment_load_values slv " + + "JOIN sediment_load sl ON slv.sediment_load_id = sl.id " + + "JOIN time_intervals ti ON sl.time_interval_id = ti.id " + + "JOIN grain_fraction gf ON sl.grain_fraction_id = gf.id " + + "JOIN measurement_station ms ON slv.measurement_station_id = ms.id " + + "JOIN rivers r ON ms.river_id = r.id " + + "WHERE r.name = :river " + + "ORDER BY sl.id"; + + private SedimentLoadDataFactory() { + } + + public SedimentLoadData getSedimentLoadData(String river) { + boolean debug = log.isDebugEnabled(); + + if (debug) { + log.debug( + "Looking for sediment load data for river '" + river + "'"); + } + + Cache cache = CacheFactory.getCache(CACHE_NAME); + + if (cache == null) { + if (debug) { + log.debug("Cache not configured."); + } + return getUncached(river); + } + + String key = "sediment-load-" + river; + + Element element = cache.get(key); + + if (element != null) { + if (debug) { + log.debug("Sediment load data found in cache"); + } + return (SedimentLoadData)element.getValue(); + } + + SedimentLoadData sedimentLoad = getUncached(river); + + if (sedimentLoad != null) { + if (debug) { + log.debug("Store sediment load data in cache."); + } + cache.put(new Element(key, sedimentLoad)); + } + + return sedimentLoad; + } + + public SedimentLoadData getUncached(String river) { + + Session session = SessionHolder.HOLDER.get(); + + SQLQuery sqlQuery = session.createSQLQuery(SQL_LOAD_RIVER_SEDIMENT_LOADS) + .addScalar("sl_id", StandardBasicTypes.INTEGER) + .addScalar("sl_description", StandardBasicTypes.STRING) + .addScalar("ti_start_time", StandardBasicTypes.TIMESTAMP) + .addScalar("ti_stop_time", StandardBasicTypes.TIMESTAMP) + .addScalar("slv_value", StandardBasicTypes.DOUBLE) + .addScalar("gf_name", StandardBasicTypes.STRING) + .addScalar("ms_id", StandardBasicTypes.INTEGER) + .addScalar("ms_station", StandardBasicTypes.DOUBLE) + .addScalar("ms_type", StandardBasicTypes.STRING); + + sqlQuery.setString("river", river); + + SedimentLoadData.Load load = null; + int grainFractionIndex = -1; + + HashMap<Integer, SedimentLoadData.Station> id2station + = new HashMap<Integer, SedimentLoadData.Station>(); + + for (Iterator iter = sqlQuery.iterate(); iter.hasNext();) { + Object [] row = (Object [])iter.next(); + + Integer sl_id = (Integer)row[0]; + String sl_description = (String)row[1]; + Timestamp ti_start_time = (Timestamp)row[2]; + Timestamp ti_stop_time = (Timestamp)row[3]; + Double slv_value = (Double)row[4]; + String gf_name = (String)row[5]; + Integer ms_id = (Integer)row[6]; + Double ms_station = (Double)row[7]; + String ms_type = (String)row[8]; + + if (load == null || load.getId() != sl_id) { + load = new SedimentLoadData.Load( + sl_id, sl_description, ti_start_time, ti_stop_time); + + // Grain fractions only change when a new sediment load starts. + grainFractionIndex = + SedimentLoadData.grainFractionIndex(gf_name); + + if (grainFractionIndex == -1) { + log.error("Unknown grain fraction type: " + gf_name); + break; + } + } + + SedimentLoadData.Station station = id2station.get(ms_id); + if (station == null) { + int type = ms_type.equalsIgnoreCase(SUSPENDED_STRING) + ? SedimentLoadData.Station.SUSPENDED + : SedimentLoadData.Station.BED_LOAD; + + station = new SedimentLoadData.Station(type, ms_station); + id2station.put(ms_id, station); + } + + station.addValue( + grainFractionIndex, + new SedimentLoadData.Value(load, slv_value)); + } + + SedimentLoadData sld = new SedimentLoadData(); + + for (SedimentLoadData.Station station: id2station.values()) { + sld.addStation(station); + } + + return sld; + } +} +// vim:set ts=4 sw=4 si et sta sts=4 fenc=utf8 :