Mercurial > roundup-cc
diff collect_issues.py @ 28:e2864dabdb8c
fixes a logical error in the filtering of columns.
* The columns are stored in the pure order as they appear in the config.
* display_issues.py has been renamed to roundup_cc_display.py.
author | Magnus Schieder <mschieder@intevation.de> |
---|---|
date | Thu, 22 Nov 2018 12:57:20 +0100 |
parents | cdab667c6abb |
children | 0d6504d02a6b |
line wrap: on
line diff
--- a/collect_issues.py Tue Nov 13 21:04:22 2018 +0100 +++ b/collect_issues.py Thu Nov 22 12:57:20 2018 +0100 @@ -24,20 +24,17 @@ import sys -# Getting all priority in their order. -CHECK_ROUNDUP_ORDER_PRIO = "priority?@action=export_csv&@columns=id,order" -# Getting all statuses in their order. -CHECK_ROUNDUP_ORDER_STATUS = "status?@action=export_csv&@columns=id,order" - # Getting keywords and their ids. CHECK_KEYWORD_VALUES = "keyword?@action=export_csv&@columns=id,name" # Getting states and their ids. CHECK_STATUS_VALUES = "status?@action=export_csv&@columns=id,name" +# Getting priority and their ids. +CHECK_PRIO_VALUES = "priority?@action=export_csv&@columns=id,name" -# Getting the priority of each issue with the filter status and keywords -SEARCH_ROUNDUP_PRIO = "issue?@action=export_csv&@columns=priority&@filter=status,keyword&@pagesize=500&@startwith=0&status={search_values}&keyword={keyword_values}" -# Getting the status of each issue with the filter keywords -SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=keyword&@pagesize=500&@startwith=0&keyword={keyword_values}" +# Getting the priority of each issue with the filter status ,keywords ,priority +SEARCH_ROUNDUP_PRIO = "issue?@action=export_csv&@columns=priority&@filter=status,keyword,priority&@pagesize=500&@startwith=0&status={status_values}&keyword={keyword_values}&priority={priority_values}" +# Getting the status of each issue with the filter keywords, status +SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=keyword&@pagesize=500&@startwith=0&keyword={keyword_values}&status={status_values}" def connect_to_server(params, baseurl): @@ -76,15 +73,18 @@ if con: con.close() -def get_ids(opener, baseurl, parameter, url): +def get_ids(opener, baseurl, parameter, url, include_no_prio=False): if parameter == [""]: - return "" + return ("", []) parameter_csv = get_csv_from_server(opener, baseurl, url) parameter_dict = {} for x in parameter_csv: parameter_dict[x["name"]] = x["id"] + if include_no_prio: + parameter_dict["None"] = "-1" + parameter_ids = [] for x in parameter: if x not in parameter_dict: @@ -93,32 +93,33 @@ parameter_ids.append(parameter_dict[x]) - return ",".join(parameter_ids) + return (",".join(parameter_ids), parameter_ids) -def issues_to_quantities(issue_csv, columns, orders_csv): +def issues_to_quantities(issue_csv, columns_ids): """Count issues per priority. Returns: a list of ints, containing how often a prio occurred [:-1] in order of the priorities, with the last being the "None" prio """ - quantities = [0] * (len(columns) +1) order_dict = {} + z = 0 + for x in columns_ids: + order_dict[x] = z + z += 1 - #convert the csv-dict reader to real dict - for row in orders_csv: - order_dict[row["id"]] = int(float(row["order"])) # int(float()) because the order-value is indeed "1.0, 2.0" etc + quantities = [0] * z for issue in issue_csv: priority = issue[issue_csv.fieldnames[0]] - if priority.isdigit() == True : - quantities[order_dict[priority] -1 ] += 1 - else: # no priority set - quantities[-1] += 1 - - # print("quantities : " + str(quantities)) + if priority in order_dict: + if priority.isdigit() == True : + quantities[order_dict[priority]] += 1 + else: + quantities[-1] += 1 + #print("quantities : " + str(quantities)) return quantities @@ -145,28 +146,29 @@ con.close() -def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, status, include_no_prio=False): +def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, status, include_no_prio): try: opener = connect_to_server(login_parmeters, baseurl) - keywords_ids_url = get_ids(opener, baseurl, keywords, CHECK_KEYWORD_VALUES) + keywords_ids_url, _ = get_ids(opener, baseurl, keywords, CHECK_KEYWORD_VALUES) if search == "prio": - order_csv = get_csv_from_server(opener, baseurl, CHECK_ROUNDUP_ORDER_PRIO) - status_ids_url = get_ids(opener, baseurl, status, CHECK_STATUS_VALUES) - formated_search_url = SEARCH_ROUNDUP_PRIO.format(search_values=status_ids_url, keyword_values=keywords_ids_url) + status_ids_url, _ = get_ids(opener, baseurl, status, CHECK_STATUS_VALUES, include_no_prio) + prio_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_PRIO_VALUES, include_no_prio) + formated_search_url = SEARCH_ROUNDUP_PRIO.format(status_values=status_ids_url, + keyword_values=keywords_ids_url, priority_values=prio_ids_url) elif search == "status": - order_csv = get_csv_from_server(opener, baseurl, CHECK_ROUNDUP_ORDER_STATUS) - formated_search_url = SEARCH_ROUNDUP_STATUS.format(keyword_values=keywords_ids_url) + status_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_STATUS_VALUES) + formated_search_url = SEARCH_ROUNDUP_STATUS.format(keyword_values=keywords_ids_url, + status_values=status_ids_url) current_issues_csv = get_csv_from_server(opener, baseurl, formated_search_url) opener.close() + #print(baseurl + formated_search_url) - quantities = issues_to_quantities(current_issues_csv, columns, order_csv) - if not include_no_prio: - quantities = quantities[:-1] + quantities = issues_to_quantities(current_issues_csv, columns_ids) save_issues_to_db(quantities, db_file, sql_create_db, sql_insert_in_db)