comparison artifacts/src/main/java/org/dive4elements/river/artifacts/model/minfo/SedimentLoadDataFactory.java @ 8033:5e3f4b4fcb28

New way to loaded sediment loads from database and cache it. The data structure a way more straight forward. TODO: Implement calculation on this basis.
author Sascha L. Teichmann <teichmann@intevation.de>
date Tue, 15 Jul 2014 12:47:52 +0200
parents
children b6e7cfcabf2c
comparison
equal deleted inserted replaced
8032:fd3a24336e6a 8033:5e3f4b4fcb28
1 /* Copyright (C) 2014 by Bundesanstalt für Gewässerkunde
2 * Software engineering by Intevation GmbH
3 *
4 * This file is Free Software under the GNU AGPL (>=v3)
5 * and comes with ABSOLUTELY NO WARRANTY! Check out the
6 * documentation coming with Dive4Elements River for details.
7 */
8 package org.dive4elements.river.artifacts.model.minfo;
9
10 import java.sql.Timestamp;
11 import java.util.HashMap;
12 import java.util.Iterator;
13
14 import net.sf.ehcache.Cache;
15 import net.sf.ehcache.Element;
16
17 import org.apache.log4j.Logger;
18 import org.dive4elements.river.artifacts.cache.CacheFactory;
19 import org.dive4elements.river.backend.SessionHolder;
20 import org.hibernate.SQLQuery;
21 import org.hibernate.Session;
22 import org.hibernate.type.StandardBasicTypes;
23
24 public class SedimentLoadDataFactory
25 {
26 private static Logger log = Logger.getLogger(SedimentLoadDataFactory.class);
27
28 public static final String CACHE_NAME = "sediment-load-data";
29
30 public static final String SUSPENDED_STRING = "Schwebstoff";
31
32 public static final String SQL_LOAD_RIVER_SEDIMENT_LOADS =
33 "SELECT " +
34 "sl.id AS sl_id, " +
35 "sl.description AS sl_description, " +
36 "ti.start_time AS ti_start_time, " +
37 "ti.stop_time AS ti_stoptime, " +
38 "slv.value AS slv_value, " +
39 "gf.name AS gf_name, " +
40 "ms.id AS ms_id, " +
41 "ms.station AS ms_station, " +
42 "ms.measurement_type AS ms_type " +
43 "FROM sediment_load_values slv " +
44 "JOIN sediment_load sl ON slv.sediment_load_id = sl.id " +
45 "JOIN time_intervals ti ON sl.time_interval_id = ti.id " +
46 "JOIN grain_fraction gf ON sl.grain_fraction_id = gf.id " +
47 "JOIN measurement_station ms ON slv.measurement_station_id = ms.id " +
48 "JOIN rivers r ON ms.river_id = r.id " +
49 "WHERE r.name = :river " +
50 "ORDER BY sl.id";
51
52 private SedimentLoadDataFactory() {
53 }
54
55 public SedimentLoadData getSedimentLoadData(String river) {
56 boolean debug = log.isDebugEnabled();
57
58 if (debug) {
59 log.debug(
60 "Looking for sediment load data for river '" + river + "'");
61 }
62
63 Cache cache = CacheFactory.getCache(CACHE_NAME);
64
65 if (cache == null) {
66 if (debug) {
67 log.debug("Cache not configured.");
68 }
69 return getUncached(river);
70 }
71
72 String key = "sediment-load-" + river;
73
74 Element element = cache.get(key);
75
76 if (element != null) {
77 if (debug) {
78 log.debug("Sediment load data found in cache");
79 }
80 return (SedimentLoadData)element.getValue();
81 }
82
83 SedimentLoadData sedimentLoad = getUncached(river);
84
85 if (sedimentLoad != null) {
86 if (debug) {
87 log.debug("Store sediment load data in cache.");
88 }
89 cache.put(new Element(key, sedimentLoad));
90 }
91
92 return sedimentLoad;
93 }
94
95 public SedimentLoadData getUncached(String river) {
96
97 Session session = SessionHolder.HOLDER.get();
98
99 SQLQuery sqlQuery = session.createSQLQuery(SQL_LOAD_RIVER_SEDIMENT_LOADS)
100 .addScalar("sl_id", StandardBasicTypes.INTEGER)
101 .addScalar("sl_description", StandardBasicTypes.STRING)
102 .addScalar("ti_start_time", StandardBasicTypes.TIMESTAMP)
103 .addScalar("ti_stop_time", StandardBasicTypes.TIMESTAMP)
104 .addScalar("slv_value", StandardBasicTypes.DOUBLE)
105 .addScalar("gf_name", StandardBasicTypes.STRING)
106 .addScalar("ms_id", StandardBasicTypes.INTEGER)
107 .addScalar("ms_station", StandardBasicTypes.DOUBLE)
108 .addScalar("ms_type", StandardBasicTypes.STRING);
109
110 sqlQuery.setString("river", river);
111
112 SedimentLoadData.Load load = null;
113 int grainFractionIndex = -1;
114
115 HashMap<Integer, SedimentLoadData.Station> id2station
116 = new HashMap<Integer, SedimentLoadData.Station>();
117
118 for (Iterator iter = sqlQuery.iterate(); iter.hasNext();) {
119 Object [] row = (Object [])iter.next();
120
121 Integer sl_id = (Integer)row[0];
122 String sl_description = (String)row[1];
123 Timestamp ti_start_time = (Timestamp)row[2];
124 Timestamp ti_stop_time = (Timestamp)row[3];
125 Double slv_value = (Double)row[4];
126 String gf_name = (String)row[5];
127 Integer ms_id = (Integer)row[6];
128 Double ms_station = (Double)row[7];
129 String ms_type = (String)row[8];
130
131 if (load == null || load.getId() != sl_id) {
132 load = new SedimentLoadData.Load(
133 sl_id, sl_description, ti_start_time, ti_stop_time);
134
135 // Grain fractions only change when a new sediment load starts.
136 grainFractionIndex =
137 SedimentLoadData.grainFractionIndex(gf_name);
138
139 if (grainFractionIndex == -1) {
140 log.error("Unknown grain fraction type: " + gf_name);
141 break;
142 }
143 }
144
145 SedimentLoadData.Station station = id2station.get(ms_id);
146 if (station == null) {
147 int type = ms_type.equalsIgnoreCase(SUSPENDED_STRING)
148 ? SedimentLoadData.Station.SUSPENDED
149 : SedimentLoadData.Station.BED_LOAD;
150
151 station = new SedimentLoadData.Station(type, ms_station);
152 id2station.put(ms_id, station);
153 }
154
155 station.addValue(
156 grainFractionIndex,
157 new SedimentLoadData.Value(load, slv_value));
158 }
159
160 SedimentLoadData sld = new SedimentLoadData();
161
162 for (SedimentLoadData.Station station: id2station.values()) {
163 sld.addStation(station);
164 }
165
166 return sld;
167 }
168 }
169 // vim:set ts=4 sw=4 si et sta sts=4 fenc=utf8 :

http://dive4elements.wald.intevation.org