# HG changeset patch # User Magnus Schieder # Date 1542887840 -3600 # Node ID e2864dabdb8ccefd16206871edf8074d10c9c401 # Parent cdab667c6abbb65957d036fee415c1604fa013c0 fixes a logical error in the filtering of columns. * The columns are stored in the pure order as they appear in the config. * display_issues.py has been renamed to roundup_cc_display.py. diff -r cdab667c6abb -r e2864dabdb8c collect_issues.py --- a/collect_issues.py Tue Nov 13 21:04:22 2018 +0100 +++ b/collect_issues.py Thu Nov 22 12:57:20 2018 +0100 @@ -24,20 +24,17 @@ import sys -# Getting all priority in their order. -CHECK_ROUNDUP_ORDER_PRIO = "priority?@action=export_csv&@columns=id,order" -# Getting all statuses in their order. -CHECK_ROUNDUP_ORDER_STATUS = "status?@action=export_csv&@columns=id,order" - # Getting keywords and their ids. CHECK_KEYWORD_VALUES = "keyword?@action=export_csv&@columns=id,name" # Getting states and their ids. CHECK_STATUS_VALUES = "status?@action=export_csv&@columns=id,name" +# Getting priority and their ids. +CHECK_PRIO_VALUES = "priority?@action=export_csv&@columns=id,name" -# Getting the priority of each issue with the filter status and keywords -SEARCH_ROUNDUP_PRIO = "issue?@action=export_csv&@columns=priority&@filter=status,keyword&@pagesize=500&@startwith=0&status={search_values}&keyword={keyword_values}" -# Getting the status of each issue with the filter keywords -SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=keyword&@pagesize=500&@startwith=0&keyword={keyword_values}" +# Getting the priority of each issue with the filter status ,keywords ,priority +SEARCH_ROUNDUP_PRIO = "issue?@action=export_csv&@columns=priority&@filter=status,keyword,priority&@pagesize=500&@startwith=0&status={status_values}&keyword={keyword_values}&priority={priority_values}" +# Getting the status of each issue with the filter keywords, status +SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=keyword&@pagesize=500&@startwith=0&keyword={keyword_values}&status={status_values}" def connect_to_server(params, baseurl): @@ -76,15 +73,18 @@ if con: con.close() -def get_ids(opener, baseurl, parameter, url): +def get_ids(opener, baseurl, parameter, url, include_no_prio=False): if parameter == [""]: - return "" + return ("", []) parameter_csv = get_csv_from_server(opener, baseurl, url) parameter_dict = {} for x in parameter_csv: parameter_dict[x["name"]] = x["id"] + if include_no_prio: + parameter_dict["None"] = "-1" + parameter_ids = [] for x in parameter: if x not in parameter_dict: @@ -93,32 +93,33 @@ parameter_ids.append(parameter_dict[x]) - return ",".join(parameter_ids) + return (",".join(parameter_ids), parameter_ids) -def issues_to_quantities(issue_csv, columns, orders_csv): +def issues_to_quantities(issue_csv, columns_ids): """Count issues per priority. Returns: a list of ints, containing how often a prio occurred [:-1] in order of the priorities, with the last being the "None" prio """ - quantities = [0] * (len(columns) +1) order_dict = {} + z = 0 + for x in columns_ids: + order_dict[x] = z + z += 1 - #convert the csv-dict reader to real dict - for row in orders_csv: - order_dict[row["id"]] = int(float(row["order"])) # int(float()) because the order-value is indeed "1.0, 2.0" etc + quantities = [0] * z for issue in issue_csv: priority = issue[issue_csv.fieldnames[0]] - if priority.isdigit() == True : - quantities[order_dict[priority] -1 ] += 1 - else: # no priority set - quantities[-1] += 1 - - # print("quantities : " + str(quantities)) + if priority in order_dict: + if priority.isdigit() == True : + quantities[order_dict[priority]] += 1 + else: + quantities[-1] += 1 + #print("quantities : " + str(quantities)) return quantities @@ -145,28 +146,29 @@ con.close() -def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, status, include_no_prio=False): +def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, status, include_no_prio): try: opener = connect_to_server(login_parmeters, baseurl) - keywords_ids_url = get_ids(opener, baseurl, keywords, CHECK_KEYWORD_VALUES) + keywords_ids_url, _ = get_ids(opener, baseurl, keywords, CHECK_KEYWORD_VALUES) if search == "prio": - order_csv = get_csv_from_server(opener, baseurl, CHECK_ROUNDUP_ORDER_PRIO) - status_ids_url = get_ids(opener, baseurl, status, CHECK_STATUS_VALUES) - formated_search_url = SEARCH_ROUNDUP_PRIO.format(search_values=status_ids_url, keyword_values=keywords_ids_url) + status_ids_url, _ = get_ids(opener, baseurl, status, CHECK_STATUS_VALUES, include_no_prio) + prio_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_PRIO_VALUES, include_no_prio) + formated_search_url = SEARCH_ROUNDUP_PRIO.format(status_values=status_ids_url, + keyword_values=keywords_ids_url, priority_values=prio_ids_url) elif search == "status": - order_csv = get_csv_from_server(opener, baseurl, CHECK_ROUNDUP_ORDER_STATUS) - formated_search_url = SEARCH_ROUNDUP_STATUS.format(keyword_values=keywords_ids_url) + status_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_STATUS_VALUES) + formated_search_url = SEARCH_ROUNDUP_STATUS.format(keyword_values=keywords_ids_url, + status_values=status_ids_url) current_issues_csv = get_csv_from_server(opener, baseurl, formated_search_url) opener.close() + #print(baseurl + formated_search_url) - quantities = issues_to_quantities(current_issues_csv, columns, order_csv) - if not include_no_prio: - quantities = quantities[:-1] + quantities = issues_to_quantities(current_issues_csv, columns_ids) save_issues_to_db(quantities, db_file, sql_create_db, sql_insert_in_db) diff -r cdab667c6abb -r e2864dabdb8c display_issues.py --- a/display_issues.py Tue Nov 13 21:04:22 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,109 +0,0 @@ -#!/usr/bin/env python3 - -""" Display previously saved issues from a database on webpage via CGI. - -author: Sascha L. Teichmann -author: Bernhard Reiter -author: Sean Engelhardt - -(c) 2010,2015 by Intevation GmbH - -This is Free Software unter the terms of the -GNU GENERAL PUBLIC LICENSE Version 3 or later. -See http://www.gnu.org/licenses/gpl-3.0.txt for details - - -##Usage Example: ## -see display_issues_demo.py or __main__ section below. -""" - -import sqlite3 as db -import cgitb -import roundup_content_data as rcd -import os - - -def make_js_object_string(array): - formated = [] - - for item in array: - formated.append("{points: " + str(item) + "}") - - return ",".join(formated) - - -def make_js_object_date(array): - formated = [] - - for item in array: - formated.append("{date : new Date('" + str(item) + "')}") - - return ", ".join(formated) - -def get_webpage(data_dict, columns, status, keywords, graph=None): - - if graph is None: - graph = os.path.dirname(os.path.realpath(__file__)) + '/graph.html' - - with open(graph, "r") as html_chart_file: - base_html_data = html_chart_file.read() - - if "None" not in columns: - data_dict["None"] = [0] - - - js_data_dickt ="{" - for col in columns.split(", "): - js_data_dickt += col + ":[" + make_js_object_string(data_dict[col]) + "]," - js_data_dickt += "}" - - base_html_data = (base_html_data - .replace("status", status) - .replace("keywords", keywords) - .replace("js_data_dickt", js_data_dickt) - .replace("var timestamp=[];", "var timestamp=[" + make_js_object_date(data_dict["date"]) + "]")) - - return base_html_data - -def compile_db_stats_html(db_file, sql_select, columns, status="", keywords="", graph=None): - - data_dict = {"date": []} - status_list = columns.split(", ") - for x in status_list: - data_dict[x] = [] - - con = None - cur = None - - try: - con = db.connect(db_file) - cur = con.cursor() - cur.execute(sql_select) - - for row in cur.fetchall(): - data_dict["date"].append(row[0]) - for x in range(len(status_list)): - data_dict[status_list[x]].append(row[x+1]) - - finally: - if cur: - cur.close() - if con: - con.close() - - return get_webpage(data_dict, columns, status, keywords, graph) - -def render_webpage(content): - for line in content.split("\n"): - print(line) - -def render_db_stats_as_html(db_file, sql_select, columns, status="", keywords=""): - render_webpage(compile_db_stats_html(db_file, sql_select, columns, status, keywords)) - -if __name__ == '__main__': - cgitb.enable() - #spit out HTML file directly, thus no need to give headers to the server - #print("Content-Type: text/html") - #print() - - render_db_stats_as_html("./demo3.db", rcd.SELECT_ALL) diff -r cdab667c6abb -r e2864dabdb8c display_issues_demo.py --- a/display_issues_demo.py Tue Nov 13 21:04:22 2018 +0100 +++ b/display_issues_demo.py Thu Nov 22 12:57:20 2018 +0100 @@ -47,6 +47,8 @@ print("Incorrect [SEARCH]Search parameter. (prio, status)") return + # roundup uses a "-" in its search parameters. Sql can't handle it. + columns = columns.replace("-", "_") cgitb.enable() # (optional) HTML traceback to browser #render_db_stats_as_html("./demo1.db", rcd.SELECT_ALL) diff -r cdab667c6abb -r e2864dabdb8c roundup_cc.py --- a/roundup_cc.py Tue Nov 13 21:04:22 2018 +0100 +++ b/roundup_cc.py Thu Nov 22 12:57:20 2018 +0100 @@ -11,7 +11,7 @@ import roundup_content_data as rcd PRIO = "critical, urgent, bug, feature, wish" -STATES = "unread, deferred, chatting, need_eg, in_progress, testing, done_cbb, resolved" +STATES = "unread, deferred, chatting, need-eg, in-progress, testing, done-cbb, resolved" def main(): @@ -48,16 +48,22 @@ elif search == "status": list_of_columns = config.get("SEARCH", "Status", fallback=STATES).split(", ") status = [""] + include_no_prio = False else: print("Incorrect [SEARCH]Search parameter. (prio, status)") return + # roundup uses a "-" in its search parameters. Sql can't handle it. + sql_list_of_columns = [] + for column in list_of_columns: + sql_list_of_columns.append(column.replace("-", "_")) + select_all, select_where, create_db, insert_new = \ - rcd.build_sql_commands(list_of_columns) + rcd.build_sql_commands(sql_list_of_columns) save_stats_in_db(search, login_parameters, base_url, database_file, - list_of_columns, create_db, insert_new, keywords, status) + list_of_columns, create_db, insert_new, keywords, status, include_no_prio) if __name__ == '__main__': main() diff -r cdab667c6abb -r e2864dabdb8c roundup_cc_display.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/roundup_cc_display.py Thu Nov 22 12:57:20 2018 +0100 @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 + +""" Display previously saved issues from a database on webpage via CGI. + +author: Sascha L. Teichmann +author: Bernhard Reiter +author: Sean Engelhardt + +(c) 2010,2015 by Intevation GmbH + +This is Free Software unter the terms of the +GNU GENERAL PUBLIC LICENSE Version 3 or later. +See http://www.gnu.org/licenses/gpl-3.0.txt for details + + +##Usage Example: ## +see display_issues_demo.py or __main__ section below. +""" + +import sqlite3 as db +import cgitb +import roundup_content_data as rcd +import os + + +def make_js_object_string(array): + formated = [] + + for item in array: + formated.append("{points: " + str(item) + "}") + + return ",".join(formated) + + +def make_js_object_date(array): + formated = [] + + for item in array: + formated.append("{date : new Date('" + str(item) + "')}") + + return ", ".join(formated) + +def get_webpage(data_dict, columns, status, keywords, graph=None): + + if graph is None: + graph = os.path.dirname(os.path.realpath(__file__)) + '/graph.html' + + with open(graph, "r") as html_chart_file: + base_html_data = html_chart_file.read() + + if "None" not in columns: + data_dict["None"] = [0] + + + js_data_dickt ="{" + for col in columns.split(", "): + js_data_dickt += col + ":[" + make_js_object_string(data_dict[col]) + "]," + js_data_dickt += "}" + + base_html_data = (base_html_data + .replace("status", status) + .replace("keywords", keywords) + .replace("js_data_dickt", js_data_dickt) + .replace("var timestamp=[];", "var timestamp=[" + make_js_object_date(data_dict["date"]) + "]")) + + return base_html_data + +def compile_db_stats_html(db_file, sql_select, columns, status="", keywords="", graph=None): + + data_dict = {"date": []} + status_list = columns.split(", ") + for x in status_list: + data_dict[x] = [] + + con = None + cur = None + + try: + con = db.connect(db_file) + cur = con.cursor() + cur.execute(sql_select) + + for row in cur.fetchall(): + data_dict["date"].append(row[0]) + for x in range(len(status_list)): + data_dict[status_list[x]].append(row[x+1]) + + finally: + if cur: + cur.close() + if con: + con.close() + + return get_webpage(data_dict, columns, status, keywords, graph) + +def render_webpage(content): + for line in content.split("\n"): + print(line) + +def render_db_stats_as_html(db_file, sql_select, columns, status="", keywords=""): + render_webpage(compile_db_stats_html(db_file, sql_select, columns, status, keywords)) + +if __name__ == '__main__': + cgitb.enable() + #spit out HTML file directly, thus no need to give headers to the server + #print("Content-Type: text/html") + #print() + + render_db_stats_as_html("./demo3.db", rcd.SELECT_ALL)