Mercurial > roundup-cc
changeset 35:b07588ac28b6
Merge
author | Magnus Schieder <mschieder@intevation.de> |
---|---|
date | Mon, 26 Nov 2018 16:37:20 +0100 |
parents | 916fa83b4144 (diff) 10167e40a657 (current diff) |
children | 59e1659a0a0b |
files | collect_issues.py |
diffstat | 17 files changed, 496 insertions(+), 300 deletions(-) [+] |
line wrap: on
line diff
--- a/README.creole Fri Nov 02 17:15:14 2018 +0100 +++ b/README.creole Mon Nov 26 16:37:20 2018 +0100 @@ -5,35 +5,23 @@ It is Free Software, check out the file headers. === Example -Run ./demo.py from a roundup-tracker to have a running tracker. +Run ./roundup_cc.py from a roundup-tracker to have a running tracker. {{{ -cp examples/collect_demo3.py c3.py -cp examples/config3.ini . -# change config3.ini -./c3.py config3.ini +# Edit config.ini +# An example can be found under /examples/config.ini +./roundup_cc.py config.ini # create or change some issues -./c3.py config3.ini +./roundup_cc.py config.ini + # to inspect the database contents -sqlite3 demo3.db 'select * from issues;' - -./display_issues_demo.py config3.ini >demo3.html -chromium demo3.html +sqlite3 demo.db 'select * from issues;' +./roundup_cc_display.py config.ini > demo.html +chromium demo.html }}} -=== Notes -examples/collect_demo2.py builds database column names dynamically -and tracks issues with priorities. The display part is still missing. - -When migrating to 3:99c68ebfb3b9, Nov 30 17:46:22 2015 -you need to add the print statements for the content-type header -to all of your cgi scripts. - -Bottledash may have a fork -https://wald.intevation.org/hg/bottledash/file/tip/modules/web_view - === Prerequisites Python v3, with build-in sqlite3 module.
--- a/collect_issues.py Fri Nov 02 17:15:14 2018 +0100 +++ b/collect_issues.py Mon Nov 26 16:37:20 2018 +0100 @@ -4,6 +4,7 @@ author: Sascha L. Teichmann <sascha.teichmann@intevation.de> author: Bernhard Reiter <bernhard@intevation.de> author: Sean Engelhardt <sean.engelhardt@intevation.de> +author: Magnus Schieder <magnus.schieder@intevation.de> (c) 2010, 2015, 2018 by Intevation GmbH @@ -21,12 +22,20 @@ import io import sqlite3 as db import os +import sys -CHECK_ROUNDUP_ORDER = "priority?@action=export_csv&@columns=id,order" -CHECK_KEYWORD_ORDER = "keyword?@action=export_csv&@columns=id,name" -CHECK_ROUNDUP_SEARCH_VALUES = "status?@action=export_csv&@columns=id,name&@filter=open&open=1" -SEARCH_ROUNDUP = "issue?@action=export_csv&@columns=priority&@filter=status,keyword&@pagesize=500&@startwith=0&status={search_values}&keyword={keyword_values}" +# Getting keywords and their ids. +CHECK_KEYWORD_VALUES = "keyword?@action=export_csv&@columns=id,name" +# Getting states and their ids. +CHECK_STATUS_VALUES = "status?@action=export_csv&@columns=id,name" +# Getting priorities and their ids. +CHECK_PRIO_VALUES = "priority?@action=export_csv&@columns=id,name" + +# Getting the priority of each issue with the filter status, keyword ,priority +SEARCH_ROUNDUP_PRIO = "issue?@action=export_csv&@columns=priority&@filter=status,keyword,priority&@pagesize=500&@startwith=0&status={status_values}&keyword={keyword_values}&priority={priority_values}" +# Getting the status of each issue with the filter keyword, priority, status +SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=priority,keyword,status&@pagesize=500&@startwith=0&priority={priority_values}&keyword={keyword_values}&status={status_values}" def connect_to_server(params, baseurl): @@ -65,62 +74,55 @@ if con: con.close() -def get_keyword_ids(opener, baseurl, keywords): - if keywords == [""]: - return "" - - keywords_csv = get_csv_from_server(opener, baseurl, CHECK_KEYWORD_ORDER) - keywords_dict = {} - for x in keywords_csv: - keywords_dict[x["name"]] = x["id"] - - keywords_ids = [] - for x in keywords: - keywords_ids.append(keywords_dict[x]) - - return ",".join(keywords_ids) +def get_ids(opener, baseurl, parameter, url, include_no_prio=False): + """Returns the IDs of the respective search parameters as string and list. + """ -def get_status_ids(opener, baseurl, status): - - status_csv = get_csv_from_server(opener, baseurl, CHECK_ROUNDUP_SEARCH_VALUES) - - if status == [""]: - return "" + if parameter == [""]: + return ("", []) - status_dict = {} - for x in status_csv: - status_dict[x["name"]] = x["id"] + parameter_csv = get_csv_from_server(opener, baseurl, url) + parameter_dict = {} + for x in parameter_csv: + parameter_dict[x["name"]] = x["id"] - staus_ids = ["-1"] - for x in status: - staus_ids.append(status_dict[x]) + if include_no_prio: + parameter_dict["None"] = "-1" - return ",".join(staus_ids) + parameter_ids = [] + for x in parameter: + if x not in parameter_dict: + print('The parameter "%s" does not exist in the tracker.' % x) + sys.exit(0) + + parameter_ids.append(parameter_dict[x]) + + return (",".join(parameter_ids), parameter_ids) -def issues_to_quantities(issue_csv, columns, orders_csv): +def issues_to_quantities(issue_csv, columns_ids): """Count issues per priority. - Returns: a list of ints, containing how often a prio occurred [:-1] - in order of the priorities, with the last being the "None" prio + Returns: a list of ints, containing how often a prio/status occurred + in the order in which they are specified in the config. """ - quantities = [0] * (len(columns) +1) order_dict = {} + z = 0 + for x in columns_ids: + order_dict[x] = z + z += 1 - # convert the csv-dict reader to real dict - for row in orders_csv: - order_dict[row["id"]] = int(float(row["order"])) # int(float()) because the order-value is indeed "1.0, 2.0" etc + quantities = [0] * z for issue in issue_csv: - priority = issue["priority"] + priority = issue[issue_csv.fieldnames[0]] if priority.isdigit() == True : - quantities[order_dict[priority] -1 ] += 1 - else: # no priority set + quantities[order_dict[priority]] += 1 + else: # no priority set quantities[-1] += 1 - - # print("quantities : " + str(quantities)) + #print("quantities : " + str(quantities)) return quantities @@ -147,25 +149,43 @@ con.close() -def save_stats_in_db(login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, status, include_no_prio=False): +def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, search_parameters, include_no_prio): try: opener = connect_to_server(login_parmeters, baseurl) - order_csv = get_csv_from_server(opener, baseurl, CHECK_ROUNDUP_ORDER) - - keywords_ids_url = get_keyword_ids(opener, baseurl, keywords) + keywords_ids_url, _ = get_ids(opener, baseurl, keywords, + CHECK_KEYWORD_VALUES) - status_ids_url = get_status_ids(opener, baseurl, status) - formated_search_url = SEARCH_ROUNDUP.format(search_values=status_ids_url, keyword_values=keywords_ids_url) + if search == "prio": + # search_parameters are states. + status_ids_url, _ = get_ids(opener, baseurl,search_parameters , + CHECK_STATUS_VALUES, include_no_prio) + prio_ids_url, columns_ids = get_ids(opener, baseurl, columns, + CHECK_PRIO_VALUES, include_no_prio) + formated_search_url = SEARCH_ROUNDUP_PRIO.format( + status_values=status_ids_url, + keyword_values=keywords_ids_url, + priority_values=prio_ids_url) - current_issues_csv = get_csv_from_server(opener, baseurl, formated_search_url) + elif search == "status": + # search_parameters are priorities. + prio_ids_url, _ = get_ids(opener, baseurl, search_parameters, + CHECK_PRIO_VALUES, include_no_prio) + status_ids_url, columns_ids = get_ids(opener, baseurl, columns, + CHECK_STATUS_VALUES) + formated_search_url = SEARCH_ROUNDUP_STATUS.format( + priority_values=prio_ids_url, + keyword_values=keywords_ids_url, + status_values=status_ids_url) + + current_issues_csv = get_csv_from_server(opener, baseurl, + formated_search_url) opener.close() + #print(baseurl + formated_search_url) - quantities = issues_to_quantities(current_issues_csv, columns, order_csv) - if not include_no_prio: - quantities = quantities[:-1] + quantities = issues_to_quantities(current_issues_csv, columns_ids) save_issues_to_db(quantities, db_file, sql_create_db, sql_insert_in_db)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/config.ini Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,32 @@ +[URL] +# URL of the tracker +BaseURL = + +[LOGIN] +Username = +Password = + +[DB] +# Path of the database in which the data is to be stored. +DatabaseFile = + +[SEARCH] +# Determines whether priorities or statuses are counted (default: prio). +Search = prio/status + +# Determines which priorities are searched for. +# (Default if 'Search=prio': critical, urgent, bug, feature, wish) +# (Default if 'Search=status': All priorities that are available in the Tracker, also 'None'.) +Priority = + +# If True is selected, the issues that have no priority are also counted (default: False). +IncludeNoPrio = True/False + +# Determines which statuses are searched for. +# (Default if 'Search=status': unread, deferred, chatting, need-eg, in-progress, testing, done-cbb) +# (Default if 'Search=prio': All statuses that are available in the Tracker.) +Status = + +# Determines which keywords are searched for. +# (default: Keywords are not included in the search.) +Keywords =
--- a/display_issues.py Fri Nov 02 17:15:14 2018 +0100 +++ b/display_issues.py Mon Nov 26 16:37:20 2018 +0100 @@ -5,8 +5,8 @@ author: Sascha L. Teichmann <sascha.teichmann@intevation.de> author: Bernhard Reiter <bernhard@intevation.de> author: Sean Engelhardt <sean.engelhardt@intevation.de> - -(c) 2010,2015 by Intevation GmbH +author: Magnus Schieder <magnus.schieder@intevation.de> +(c) 2010,2015,2018 by Intevation GmbH This is Free Software unter the terms of the GNU GENERAL PUBLIC LICENSE Version 3 or later. @@ -40,7 +40,8 @@ return ", ".join(formated) -def get_webpage(status, keywords, graph=None): +def get_webpage(data_dict, columns, search_parameters, keywords, graph=None): + """Returns the website with inserted data.""" if graph is None: graph = os.path.dirname(os.path.realpath(__file__)) + '/graph.html' @@ -48,21 +49,29 @@ with open(graph, "r") as html_chart_file: base_html_data = html_chart_file.read() + if "None" not in columns: + data_dict["None"] = [0] + + # Converts the data to a JS object. + js_data_dickt ="{" + for col in columns.split(", "): + js_data_dickt += col + ":[" + make_js_object_string(data_dict[col]) + "]," + js_data_dickt += "}" base_html_data = (base_html_data - .replace("status", status) + .replace("search_parameters", search_parameters) .replace("keywords", keywords) - .replace("var critical=[];", "var critical=[" + make_js_object_string(rcd.data_dict["critical"]) + "]") - .replace("var urgent=[];", "var urgent=[" + make_js_object_string(rcd.data_dict["urgent"]) + "]") - .replace("var bug=[];", "var bug=[" + make_js_object_string(rcd.data_dict["bug"]) + "]") - .replace("var feature=[];", "var feature=[" + make_js_object_string(rcd.data_dict["feature"]) + "]") - .replace("var wish=[];", "var wish=[" + make_js_object_string(rcd.data_dict["wish"]) + "]") - .replace("var noPrio=[];", "var noPrio=[" + make_js_object_string(rcd.data_dict["noPrio"]) + "]") - .replace("var timestamp=[];", "var timestamp=[" + make_js_object_date(rcd.data_dict["date"]) + "]")) + .replace("js_data_dickt", js_data_dickt) + .replace("var timestamp=[];", "var timestamp=[" + make_js_object_date(data_dict["date"]) + "]")) return base_html_data -def compile_db_stats_html(db_file, sql_select, status="", keywords="", graph=None): +def compile_db_stats_html(db_file, sql_select, columns, search_parameters="", keywords="", graph=None): + + data_dict = {"date": []} + status_list = columns.split(", ") + for x in status_list: + data_dict[x] = [] con = None cur = None @@ -73,27 +82,24 @@ cur.execute(sql_select) for row in cur.fetchall(): - rcd.data_dict["date"].append(row[0]) - rcd.data_dict["critical"].append(row[1]) - rcd.data_dict["urgent"].append(row[2]) - rcd.data_dict["bug"].append(row[3]) - rcd.data_dict["feature"].append(row[4]) - rcd.data_dict["wish"].append(row[5]) - rcd.data_dict["noPrio"].append(row[6]) + data_dict["date"].append(row[0]) + for x in range(len(status_list)): + data_dict[status_list[x]].append(row[x+1]) + finally: if cur: cur.close() if con: con.close() - return get_webpage(status, keywords, graph) + return get_webpage(data_dict, columns, search_parameters, keywords, graph) def render_webpage(content): for line in content.split("\n"): print(line) -def render_db_stats_as_html(db_file, sql_select, status="", keywords=""): - render_webpage(compile_db_stats_html(db_file, sql_select, status, keywords)) +def render_db_stats_as_html(db_file, sql_select, columns, search_parameters="", keywords=""): + render_webpage(compile_db_stats_html(db_file, sql_select, columns, search_parameters, keywords)) if __name__ == '__main__': cgitb.enable()
--- a/display_issues_demo.py Fri Nov 02 17:15:14 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,38 +0,0 @@ -#!/usr/bin/env python3 - -""" Fetch issues from a roundup-tracker and save them in a databse. - -author: Sascha L. Teichmann <sascha.teichmann@intevation.de> -author: Bernhard Reiter <bernhard@intevation.de> -author: Sean Engelhardt <sean.engelhardt@intevation.de> - -(c) 2010, 2015, 2018 by Intevation GmbH - -This is Free Software unter the terms of the -GNU GENERAL PUBLIC LICENSE Version 3 or later. -See http://www.gnu.org/licenses/gpl-3.0.txt for details -""" -import configparser -import argparse -from display_issues import * - -parser = argparse.ArgumentParser() -parser.add_argument("config_file", type=str, metavar="[config file]") -args = parser.parse_args() - -config = configparser.ConfigParser() -config.read(args.config_file) - -db = config.get("DB", "DatabaseFile") -keywords = config.get("SEARCH", "Keywords", fallback="") -status = config.get("SEARCH", "Status", fallback="") -columns = config.get("SEARCH", "Columns", fallback="critical, urgent, bug, feature, wish") -noPrio = config.get("SEARCH", "IncludeNoPrio", fallback=False) -if noPrio: - columns += ", None" - -cgitb.enable() # (optional) HTML traceback to browser -#render_db_stats_as_html("./demo1.db", rcd.SELECT_ALL) -render_db_stats_as_html(db, - rcd.build_sql_select(columns).format("timestamp > date('now', '-2 month')"), - status, keywords)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc/collect_demo1.py Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +"""Connect to roundup-tracker and save status to db for example demo1. + +Run periodically as often as you want data points to be saved. +demo1 only tracks issues with a priority. +""" +from collect_issues import save_stats_in_db +import roundup_content_data as rcd + +BASE_URL_DEMO = "http://localhost:8917/demo/" +SEARCH_URL_DEMO = "issue?@action=export_csv&@columns=title,priority&@filter=status&@pagesize=50&@startwith=0&status=-1,1,2,3,4,5,6,7" + +LOGIN_PARAMETERS_DEMO = ( + ("__login_name", "demo"), + ("__login_password", "demo"), + ("@action", "Login"), + ) + +save_stats_in_db(LOGIN_PARAMETERS_DEMO, BASE_URL_DEMO, "./demo1.py", + rcd.COLUMNS, rcd.CREATE_DB, rcd.INSERT_NEW, SEARCH_URL_DEMO)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc/collect_demo2.py Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +"""Connect to roundup-tracker and save status to db for example demo2. + +Run periodically as often as you want data points to be saved. +demo2 tracks issue without priority in column `None`. +""" +from collect_issues import save_stats_in_db +import roundup_content_data as rcd + +BASE_URL_DEMO = "http://localhost:8917/demo/" +SEARCH_URL_DEMO = "issue?@action=export_csv&@columns=title,priority&@filter=status&@pagesize=50&@startwith=0&status=-1,1,2,3,4,5,6,7" + +LOGIN_PARAMETERS_DEMO = ( + ("__login_name", "demo"), + ("__login_password", "demo"), + ("@action", "Login"), + ) + +list_of_columns = ['critical', 'major', 'normal', 'minor', 'wishlist'] +data_dict = { key: [] for key in list_of_columns } + +# To track issues without prio we need to add an extra column in the db cmds. +select_all, select_where, create_db, insert_new = \ + rcd.build_sql_commands(list_of_columns + ['None']) + +# We enable the extra colum with `include_no_prio=True` +save_stats_in_db(LOGIN_PARAMETERS_DEMO, BASE_URL_DEMO, "./demo2.db", + list_of_columns, create_db, insert_new, + SEARCH_URL_DEMO, include_no_prio=True)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc/collect_demo3.py Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +"""Connect to roundup-tracker and save status to db for example demo1. + +Run periodically as often as you want data points to be saved. +demo1 only tracks issues with a priority. +""" + +import json +import configparser +import argparse + +from collect_issues import save_stats_in_db +import roundup_content_data as rcd + +COLUMNS = "critical, urgent, bug, feature, wish" + +parser = argparse.ArgumentParser() +parser.add_argument("config_file", type=str, metavar="[config file]") +args = parser.parse_args() + +config = configparser.ConfigParser() +config.read(args.config_file) + +base_url = config.get("URL", "BaseURL") + +user = config.get("LOGIN","Username") +password = config.get("LOGIN", "Password") + +LOGIN_PARAMETERS_DEMO = ( + ("__login_name", user), + ("__login_password", password), + ("@action", "Login"), + ) + +database_file = config.get("DB", "DatabaseFile") + +keywords = config.get("SEARCH", "Keywords", fallback="").split(", ") + +list_of_columns = config.get("SEARCH", "Columns", fallback=COLUMNS).split(", ") + +status = config.get("SEARCH", "Status", fallback="").split(", ") + +include_no_prio = config.getboolean("SEARCH", "IncludeNoPrio", fallback= False) + +if include_no_prio: + list_of_columns += ["None"] + +select_all, select_where, create_db, insert_new = \ + rcd.build_sql_commands(list_of_columns) + +save_stats_in_db(LOGIN_PARAMETERS_DEMO, base_url, database_file, + list_of_columns, create_db, insert_new, keywords, status)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc/old_README.creole Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,44 @@ += Roundup Issue Collector + +Grab and display data from a http://roundup-tracker.org/ instance. + +It is Free Software, check out the file headers. + +=== Example +Run ./demo.py from a roundup-tracker to have a running tracker. + +{{{ +cp examples/collect_demo3.py c3.py +cp examples/config3.ini . +# change config3.ini +./c3.py config3.ini +# create or change some issues +./c3.py config3.ini + +# to inspect the database contents +sqlite3 demo3.db 'select * from issues;' + +./display_issues_demo.py config3.ini >demo3.html +chromium demo3.html +}}} + + +=== Notes +examples/collect_demo2.py builds database column names dynamically +and tracks issues with priorities. The display part is still missing. + +When migrating to 3:99c68ebfb3b9, Nov 30 17:46:22 2015 +you need to add the print statements for the content-type header +to all of your cgi scripts. + +Bottledash may have a fork +https://wald.intevation.org/hg/bottledash/file/tip/modules/web_view + +=== Prerequisites + +Python v3, with build-in sqlite3 module. + +=== Included + +http://d3js.org/ initially used with 3.5.5 + """Library released under BSD license. Copyright 2015 Mike Bostock."
--- a/examples/collect_demo1.py Fri Nov 02 17:15:14 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,20 +0,0 @@ -#!/usr/bin/env python3 -"""Connect to roundup-tracker and save status to db for example demo1. - -Run periodically as often as you want data points to be saved. -demo1 only tracks issues with a priority. -""" -from collect_issues import save_stats_in_db -import roundup_content_data as rcd - -BASE_URL_DEMO = "http://localhost:8917/demo/" -SEARCH_URL_DEMO = "issue?@action=export_csv&@columns=title,priority&@filter=status&@pagesize=50&@startwith=0&status=-1,1,2,3,4,5,6,7" - -LOGIN_PARAMETERS_DEMO = ( - ("__login_name", "demo"), - ("__login_password", "demo"), - ("@action", "Login"), - ) - -save_stats_in_db(LOGIN_PARAMETERS_DEMO, BASE_URL_DEMO, "./demo1.py", - rcd.COLUMNS, rcd.CREATE_DB, rcd.INSERT_NEW, SEARCH_URL_DEMO)
--- a/examples/collect_demo2.py Fri Nov 02 17:15:14 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,29 +0,0 @@ -#!/usr/bin/env python3 -"""Connect to roundup-tracker and save status to db for example demo2. - -Run periodically as often as you want data points to be saved. -demo2 tracks issue without priority in column `None`. -""" -from collect_issues import save_stats_in_db -import roundup_content_data as rcd - -BASE_URL_DEMO = "http://localhost:8917/demo/" -SEARCH_URL_DEMO = "issue?@action=export_csv&@columns=title,priority&@filter=status&@pagesize=50&@startwith=0&status=-1,1,2,3,4,5,6,7" - -LOGIN_PARAMETERS_DEMO = ( - ("__login_name", "demo"), - ("__login_password", "demo"), - ("@action", "Login"), - ) - -list_of_columns = ['critical', 'major', 'normal', 'minor', 'wishlist'] -data_dict = { key: [] for key in list_of_columns } - -# To track issues without prio we need to add an extra column in the db cmds. -select_all, select_where, create_db, insert_new = \ - rcd.build_sql_commands(list_of_columns + ['None']) - -# We enable the extra colum with `include_no_prio=True` -save_stats_in_db(LOGIN_PARAMETERS_DEMO, BASE_URL_DEMO, "./demo2.db", - list_of_columns, create_db, insert_new, - SEARCH_URL_DEMO, include_no_prio=True)
--- a/examples/collect_demo3.py Fri Nov 02 17:15:14 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 -"""Connect to roundup-tracker and save status to db for example demo1. - -Run periodically as often as you want data points to be saved. -demo1 only tracks issues with a priority. -""" - -import json -import configparser -import argparse - -from collect_issues import save_stats_in_db -import roundup_content_data as rcd - -COLUMNS = "critical, urgent, bug, feature, wish" - -parser = argparse.ArgumentParser() -parser.add_argument("config_file", type=str, metavar="[config file]") -args = parser.parse_args() - -config = configparser.ConfigParser() -config.read(args.config_file) - -base_url = config.get("URL", "BaseURL") - -user = config.get("LOGIN","Username") -password = config.get("LOGIN", "Password") - -LOGIN_PARAMETERS_DEMO = ( - ("__login_name", user), - ("__login_password", password), - ("@action", "Login"), - ) - -database_file = config.get("DB", "DatabaseFile") - -keywords = config.get("SEARCH", "Keywords", fallback="").split(", ") - -list_of_columns = config.get("SEARCH", "Columns", fallback=COLUMNS).split(", ") - -status = config.get("SEARCH", "Status", fallback="").split(", ") - -include_no_prio = config.getboolean("SEARCH", "IncludeNoPrio", fallback= False) - -if include_no_prio: - list_of_columns += ["None"] - -select_all, select_where, create_db, insert_new = \ - rcd.build_sql_commands(list_of_columns) - -save_stats_in_db(LOGIN_PARAMETERS_DEMO, base_url, database_file, - list_of_columns, create_db, insert_new, keywords, status)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/examples/config.ini Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,16 @@ +[URL] +BaseURL = http://localhost:8917/demo/ + +[LOGIN] +Username = demo +Password = demo + +[DB] +DatabaseFile = ./demo.db + +[SEARCH] +Search = prio +Keywords = keyword1, keyword2, keyword3 +Priority = critical, urgent, bug, feature, wish +Status = unread, deferred, chatting, need-eg, in-progress, testing, done-cbb, resolved +IncludeNoPrio = True
--- a/examples/config3.ini Fri Nov 02 17:15:14 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,15 +0,0 @@ -[URL] -BaseURL = http://localhost:8917/demo/ - -[LOGIN] -Username = demo -Password = demo - -[DB] -DatabaseFile = ./demo3.db - -[SEARCH] -Keywords = keywords1, keyword2, keyword3 -Columns = critical, urgent, bug, feature, wish -Status = unread, deferred, chatting, need-eg, in-progress, testing, done-cbb, resolved -IncludeNoPrio = True
--- a/graph.html Fri Nov 02 17:15:14 2018 +0100 +++ b/graph.html Mon Nov 26 16:37:20 2018 +0100 @@ -33,55 +33,71 @@ opacity: 1; } - .line.critical { + .line.red { stroke: red; } - .line.critical.legend { + .line.red.legend { fill: red; } - .line.urgent { + .line.orange { stroke: orange; } - .line.urgent.legend { + .line.orange.legend { fill: orange; } - .line.bug { + .line.violet { stroke: violet; } - .line.bug.legend { + .line.violet.legend { fill: violet; } - .line.feature { + .line.chartreuse { stroke: chartreuse; style: stroke-dasharray; } - .line.feature.legend { + .line.chartreuse.legend { fill: chartreuse; } - .line.wish { + .line.blue { stroke: blue; } - .line.wish.legend { + .line.blue.legend { fill: blue; } - .line.noprio { + .line.grey { stroke: grey; } - .line.noprio.legend { + .line.grey.legend { fill: grey; } + .line.aqua{ + stroke: aqua; + } + + .line.aqua.legend { + fill: aqua; + } + + .line.darkgreen { + stroke: darkgreen; + } + + .line.darkgreen.legend { + fill: darkgreen; + } + .grid .tick { stroke: lightgrey; opacity: 0.7; @@ -95,7 +111,7 @@ </head> <body> <h1>Filter</h1> - <p>States: status</p> + <p>Search parameters: search_parameters</p> <p>Keywords: keywords</p> <div id="content" style="display: inline-block"></div> <script type="text/javascript" src="d3.v3.min.js"></script> @@ -106,14 +122,11 @@ makeChart(); }; - var critical=[]; - var urgent=[]; - var bug=[]; - var feature=[]; - var wish=[]; - var noPrio=[]; + var timestamp=[]; + var data=js_data_dickt + var linesCSS = ["red", "orange", "violet", "chartreuse", "blue", "grey", "aqua", "darkgreen"] function assignIssueToDate(issueArray, dateArray){ @@ -146,13 +159,9 @@ function getMaxIssues(){ maxIssuesOfAllArrays = []; - maxIssuesOfAllArrays.push(maxInObject(critical)); - maxIssuesOfAllArrays.push(maxInObject(urgent)); - maxIssuesOfAllArrays.push(maxInObject(bug)); - maxIssuesOfAllArrays.push(maxInObject(feature)); - maxIssuesOfAllArrays.push(maxInObject(wish)); - maxIssuesOfAllArrays.push(maxInObject(noPrio)); - + for (col in data){ + maxIssuesOfAllArrays.push(maxInObject(data[col])) + } return Math.max.apply(Math, maxIssuesOfAllArrays)+1; } @@ -201,7 +210,7 @@ return top_distance; } - function draw_legend_line(svg, width, Ypos, text, issues){ + function draw_legend_line(svg, width, Ypos, linesColour, text, issues){ svg.append("svg:text") .attr("class", "legend") .attr("x", width-30 ) @@ -210,24 +219,24 @@ svg.append("svg:text") .attr("class", "legend") - .attr("x", width+35 ) + .attr("x", width+65 ) .attr("y", Ypos) .text(issues); svg.append("rect") - .attr("class", "line " + text.toLowerCase() + " legend") + .attr("class", "line " + linesColour.toLowerCase() + " legend") .attr("x", width-30) .attr("y", Ypos-20) .attr("width", 100) .attr("height", 2); } - draw_legend_line(svg, legend_distance, set_propper_distance(distance_steps), "Critical", critical[critical.length-1].points); - draw_legend_line(svg, legend_distance, set_propper_distance(distance_steps), "Urgent", urgent[urgent.length-1].points); - draw_legend_line(svg, legend_distance, set_propper_distance(distance_steps), "Bug", bug[bug.length-1].points); - draw_legend_line(svg, legend_distance, set_propper_distance(distance_steps), "Feature", feature[feature.length-1].points); - draw_legend_line(svg, legend_distance, set_propper_distance(distance_steps), "Wish", wish[wish.length-1].points); - draw_legend_line(svg, legend_distance, set_propper_distance(distance_steps), "NoPrio", noPrio[noPrio.length-1].points); + var colourNume = 0 + for (col in data) { + graph = data[col] + draw_legend_line(svg, legend_distance, set_propper_distance(distance_steps), linesCSS[colourNume], col, graph[graph.length-1].points); + colourNume += 1 + } } @@ -237,7 +246,7 @@ //declaration var sizeOfSystemBorders = 50; - var margin = {top: 20, right: 100, bottom: 90, left: 60}, + var margin = {top: 20, right: 150, bottom: 90, left: 60}, width = (document.documentElement.clientWidth-sizeOfSystemBorders) - margin.left - margin.right, height = (document.documentElement.clientHeight-sizeOfSystemBorders) - margin.top - margin.bottom; @@ -252,12 +261,12 @@ .y(function(d) { return y(d.points); }); //lines - var criticalLine = base_line; - var urgentLine = base_line; - var bugLine = base_line; - var featureLine = base_line; - var wishLine = base_line; - var noPrioLine = base_line; + + lines = {} + for (col in data) { + lines[col] = base_line + } + var timestampLine = base_line; @@ -359,14 +368,13 @@ .attr("y", -5) .text("Issues Nach Zeit"); - - draw_line(svg, wish, "line wish", wishLine, "0, 0"); - draw_line(svg, feature, "line feature", featureLine, "3, 3"); - draw_line(svg, bug, "line bug", bugLine, "7, 7"); - draw_line(svg, urgent, "line urgent", urgentLine, "13, 13"); - draw_line(svg, critical, "line critical", criticalLine, "17, 17"); - draw_line(svg, noPrio, "line noprio", noPrioLine, "17, 17"); - + var shape = 0 + var colourNume = 0 + for (col in data){ + draw_line(svg, data[col], "line " + linesCSS[colourNume] , lines[col], shape + ", " +shape); + colourNume += 1 + shape += 3 + } makeLegend(svg, width);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/roundup_cc.py Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +"""Connect to roundup-tracker and save status to db. + +Run periodically as often as you want data points to be saved. +""" + +import configparser +import argparse + +from collect_issues import save_stats_in_db +import roundup_content_data as rcd + +# Default priorities of 'roundup'. +PRIO = "critical, urgent, bug, feature, wish" +# Default statuses of 'roundup'. +STATES = "unread, deferred, chatting, need-eg, in-progress, testing, done-cbb, resolved" + +def main(): + + parser = argparse.ArgumentParser() + parser.add_argument("config_file", type=str, metavar="[config file]") + args = parser.parse_args() + + config = configparser.ConfigParser() + config.read(args.config_file) + + # URL of the issue tracker + base_url = config.get("URL", "BaseURL") + + user = config.get("LOGIN","Username") + password = config.get("LOGIN", "Password") + + login_parameters = ( + ("__login_name", user), + ("__login_password", password), + ("@action", "Login"), + ) + + database_file = config.get("DB", "DatabaseFile") + + keywords = config.get("SEARCH", "Keywords", fallback="").split(", ") + include_no_prio = config.getboolean("SEARCH", "IncludeNoPrio", fallback= False) + + search = config.get("SEARCH", "Search", fallback="prio") + if search == "prio": + list_of_columns = config.get("SEARCH", "Priority", fallback=PRIO).split(", ") + search_parameters = config.get("SEARCH", "Status", fallback="").split(", ") + if include_no_prio: + list_of_columns += ["None"] + + elif search == "status": + list_of_columns = config.get("SEARCH", "Status", fallback=STATES).split(", ") + search_parameters = config.get("SEARCH", "Priority", fallback="").split(", ") + if include_no_prio: + search_parameters += ["None"] + + + else: + print("Incorrect [SEARCH]Search parameter. (prio, status)") + return + + # roundup uses a "-" in its search parameters. Sql can't handle it. + sql_list_of_columns = [] + for column in list_of_columns: + sql_list_of_columns.append(column.replace("-", "_")) + + select_all, select_where, create_db, insert_new = \ + rcd.build_sql_commands(sql_list_of_columns) + + save_stats_in_db(search, login_parameters, base_url, database_file, + list_of_columns, create_db, insert_new, keywords, + search_parameters, include_no_prio) + +if __name__ == '__main__': + main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/roundup_cc_display.py Mon Nov 26 16:37:20 2018 +0100 @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 + +""" Fetch issues from a roundup-tracker and save them in a databse. + +author: Sascha L. Teichmann <sascha.teichmann@intevation.de> +author: Bernhard Reiter <bernhard@intevation.de> +author: Sean Engelhardt <sean.engelhardt@intevation.de> +author: Magnus Schieder <magnus.schieder@intevation.de> + +(c) 2010, 2015, 2018 by Intevation GmbH + +This is Free Software unter the terms of the +GNU GENERAL PUBLIC LICENSE Version 3 or later. +See http://www.gnu.org/licenses/gpl-3.0.txt for details +""" +import configparser +import argparse +from display_issues import * + +def main(): + PRIO = "critical, urgent, bug, feature, wish" + STATES = "unread, deferred, chatting, need_eg, in_progress, testing, done_cbb, resolved" + + parser = argparse.ArgumentParser() + parser.add_argument("config_file", type=str, metavar="[config file]") + args = parser.parse_args() + + config = configparser.ConfigParser() + config.read(args.config_file) + + db = config.get("DB", "DatabaseFile") + keywords = config.get("SEARCH", "Keywords", fallback="") + + search = config.get("SEARCH", "Search", fallback="prio") + if search == "prio": + search_parameters = config.get("SEARCH", "Status", fallback="") + columns = config.get("SEARCH", "Priority", fallback=PRIO) + noPrio = config.get("SEARCH", "IncludeNoPrio", fallback=False) + if noPrio: + columns += ", None" + + elif search == "status": + columns = config.get("SEARCH", "Status", fallback=STATES) + search_parameters = config.get("SEARCH", "Priority", fallback="") + + else: + print("Incorrect [SEARCH]Search parameter. (prio, status)") + return + + # roundup uses a "-" in its search parameters. Sql can't handle it. + columns = columns.replace("-", "_") + + cgitb.enable() # (optional) HTML traceback to browser + #render_db_stats_as_html("./demo1.db", rcd.SELECT_ALL) + render_db_stats_as_html(db, + rcd.build_sql_select(columns).format("timestamp > date('now', '-2 month')"), + columns, search_parameters, keywords) + +if __name__ == '__main__': + main()