Mercurial > roundup-cc
changeset 31:9aca070c86bd
Add the filter 'priority' for the search by status.
author | Magnus Schieder <mschieder@intevation.de> |
---|---|
date | Thu, 22 Nov 2018 19:34:53 +0100 |
parents | 0d6504d02a6b |
children | 80bbd06fe8ec |
files | collect_issues.py display_issues.py graph.html roundup_cc.py roundup_cc_display.py |
diffstat | 5 files changed, 43 insertions(+), 26 deletions(-) [+] |
line wrap: on
line diff
--- a/collect_issues.py Thu Nov 22 14:07:55 2018 +0100 +++ b/collect_issues.py Thu Nov 22 19:34:53 2018 +0100 @@ -34,7 +34,7 @@ # Getting the priority of each issue with the filter status ,keywords ,priority SEARCH_ROUNDUP_PRIO = "issue?@action=export_csv&@columns=priority&@filter=status,keyword,priority&@pagesize=500&@startwith=0&status={status_values}&keyword={keyword_values}&priority={priority_values}" # Getting the status of each issue with the filter keywords, status -SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=keyword,status&@pagesize=500&@startwith=0&keyword={keyword_values}&status={status_values}" +SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=priority,keyword,status&@pagesize=500&@startwith=0&priority={priority_values}&keyword={keyword_values}&status={status_values}" def connect_to_server(params, baseurl): @@ -146,27 +146,41 @@ con.close() -def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, status, include_no_prio): +def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, search_parameters, include_no_prio): try: opener = connect_to_server(login_parmeters, baseurl) - keywords_ids_url, _ = get_ids(opener, baseurl, keywords, CHECK_KEYWORD_VALUES) + keywords_ids_url, _ = get_ids(opener, baseurl, keywords, + CHECK_KEYWORD_VALUES) if search == "prio": - status_ids_url, _ = get_ids(opener, baseurl, status, CHECK_STATUS_VALUES, include_no_prio) - prio_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_PRIO_VALUES, include_no_prio) - formated_search_url = SEARCH_ROUNDUP_PRIO.format(status_values=status_ids_url, - keyword_values=keywords_ids_url, priority_values=prio_ids_url) + # search_parameters are statuses. + status_ids_url, _ = get_ids(opener, baseurl,search_parameters , + CHECK_STATUS_VALUES, include_no_prio) + prio_ids_url, columns_ids = get_ids(opener, baseurl, columns, + CHECK_PRIO_VALUES, include_no_prio) + formated_search_url = SEARCH_ROUNDUP_PRIO.format( + status_values=status_ids_url, + keyword_values=keywords_ids_url, + priority_values=prio_ids_url) + elif search == "status": - status_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_STATUS_VALUES) - formated_search_url = SEARCH_ROUNDUP_STATUS.format(keyword_values=keywords_ids_url, + # search_parameters are priorities. + prio_ids_url, _ = get_ids(opener, baseurl, search_parameters, + CHECK_PRIO_VALUES, include_no_prio) + status_ids_url, columns_ids = get_ids(opener, baseurl, columns, + CHECK_STATUS_VALUES) + formated_search_url = SEARCH_ROUNDUP_STATUS.format( + priority_values=prio_ids_url, + keyword_values=keywords_ids_url, status_values=status_ids_url) - current_issues_csv = get_csv_from_server(opener, baseurl, formated_search_url) + current_issues_csv = get_csv_from_server(opener, baseurl, + formated_search_url) opener.close() - #print(baseurl + formated_search_url) + print(baseurl + formated_search_url) quantities = issues_to_quantities(current_issues_csv, columns_ids)
--- a/display_issues.py Thu Nov 22 14:07:55 2018 +0100 +++ b/display_issues.py Thu Nov 22 19:34:53 2018 +0100 @@ -40,7 +40,7 @@ return ", ".join(formated) -def get_webpage(data_dict, columns, status, keywords, graph=None): +def get_webpage(data_dict, columns, search_parameters, keywords, graph=None): if graph is None: graph = os.path.dirname(os.path.realpath(__file__)) + '/graph.html' @@ -58,14 +58,14 @@ js_data_dickt += "}" base_html_data = (base_html_data - .replace("status", status) + .replace("search_parameters", search_parameters) .replace("keywords", keywords) .replace("js_data_dickt", js_data_dickt) .replace("var timestamp=[];", "var timestamp=[" + make_js_object_date(data_dict["date"]) + "]")) return base_html_data -def compile_db_stats_html(db_file, sql_select, columns, status="", keywords="", graph=None): +def compile_db_stats_html(db_file, sql_select, columns, search_parameters="", keywords="", graph=None): data_dict = {"date": []} status_list = columns.split(", ") @@ -91,14 +91,14 @@ if con: con.close() - return get_webpage(data_dict, columns, status, keywords, graph) + return get_webpage(data_dict, columns, search_parameters, keywords, graph) def render_webpage(content): for line in content.split("\n"): print(line) -def render_db_stats_as_html(db_file, sql_select, columns, status="", keywords=""): - render_webpage(compile_db_stats_html(db_file, sql_select, columns, status, keywords)) +def render_db_stats_as_html(db_file, sql_select, columns, search_parameters="", keywords=""): + render_webpage(compile_db_stats_html(db_file, sql_select, columns, search_parameters, keywords)) if __name__ == '__main__': cgitb.enable()
--- a/graph.html Thu Nov 22 14:07:55 2018 +0100 +++ b/graph.html Thu Nov 22 19:34:53 2018 +0100 @@ -111,7 +111,7 @@ </head> <body> <h1>Filter</h1> - <p>States: status</p> + <p>Search parameters: search_parameters</p> <p>Keywords: keywords</p> <div id="content" style="display: inline-block"></div> <script type="text/javascript" src="d3.v3.min.js"></script>
--- a/roundup_cc.py Thu Nov 22 14:07:55 2018 +0100 +++ b/roundup_cc.py Thu Nov 22 19:34:53 2018 +0100 @@ -36,19 +36,22 @@ database_file = config.get("DB", "DatabaseFile") keywords = config.get("SEARCH", "Keywords", fallback="").split(", ") + include_no_prio = config.getboolean("SEARCH", "IncludeNoPrio", fallback= False) search = config.get("SEARCH", "Search", fallback="prio") if search == "prio": list_of_columns = config.get("SEARCH", "Priority", fallback=PRIO).split(", ") - status = config.get("SEARCH", "Status", fallback="").split(", ") - include_no_prio = config.getboolean("SEARCH", "IncludeNoPrio", fallback= False) + search_parameters = config.get("SEARCH", "Status", fallback="").split(", ") if include_no_prio: list_of_columns += ["None"] elif search == "status": list_of_columns = config.get("SEARCH", "Status", fallback=STATES).split(", ") - status = [""] - include_no_prio = False + # statsu = columns Must change + search_parameters = config.get("SEARCH", "Priority", fallback="").split(", ") + if include_no_prio: + search_parameters += ["None"] + else: print("Incorrect [SEARCH]Search parameter. (prio, status)") @@ -63,7 +66,7 @@ rcd.build_sql_commands(sql_list_of_columns) save_stats_in_db(search, login_parameters, base_url, database_file, - list_of_columns, create_db, insert_new, keywords, status, include_no_prio) + list_of_columns, create_db, insert_new, keywords, search_parameters, include_no_prio) if __name__ == '__main__': main()
--- a/roundup_cc_display.py Thu Nov 22 14:07:55 2018 +0100 +++ b/roundup_cc_display.py Thu Nov 22 19:34:53 2018 +0100 @@ -33,7 +33,7 @@ search = config.get("SEARCH", "Search", fallback="prio") if search == "prio": - status = config.get("SEARCH", "Status", fallback="") + search_parameters = config.get("SEARCH", "Status", fallback="") columns = config.get("SEARCH", "Priority", fallback=PRIO) noPrio = config.get("SEARCH", "IncludeNoPrio", fallback=False) if noPrio: @@ -41,7 +41,7 @@ elif search == "status": columns = config.get("SEARCH", "Status", fallback=STATES) - status = "" + search_parameters = config.get("SEARCH", "Priority", fallback="") else: print("Incorrect [SEARCH]Search parameter. (prio, status)") @@ -54,7 +54,7 @@ #render_db_stats_as_html("./demo1.db", rcd.SELECT_ALL) render_db_stats_as_html(db, rcd.build_sql_select(columns).format("timestamp > date('now', '-2 month')"), - columns, status, keywords) + columns, search_parameters, keywords) if __name__ == '__main__': main()