Mercurial > roundup-cc
view collect_issues.py @ 28:e2864dabdb8c
fixes a logical error in the filtering of columns.
* The columns are stored in the pure order as they appear in the config.
* display_issues.py has been renamed to roundup_cc_display.py.
author | Magnus Schieder <mschieder@intevation.de> |
---|---|
date | Thu, 22 Nov 2018 12:57:20 +0100 |
parents | cdab667c6abb |
children | 0d6504d02a6b |
line wrap: on
line source
#!/usr/bin/env python3 """ Fetch issues from a roundup-tracker and save them in a databse. author: Sascha L. Teichmann <sascha.teichmann@intevation.de> author: Bernhard Reiter <bernhard@intevation.de> author: Sean Engelhardt <sean.engelhardt@intevation.de> (c) 2010, 2015, 2018 by Intevation GmbH This is Free Software unter the terms of the GNU GENERAL PUBLIC LICENSE Version 3 or later. See http://www.gnu.org/licenses/gpl-3.0.txt for details For usage see examples/. """ import http.cookiejar import urllib.parse import urllib.request import csv import io import sqlite3 as db import os import sys # Getting keywords and their ids. CHECK_KEYWORD_VALUES = "keyword?@action=export_csv&@columns=id,name" # Getting states and their ids. CHECK_STATUS_VALUES = "status?@action=export_csv&@columns=id,name" # Getting priority and their ids. CHECK_PRIO_VALUES = "priority?@action=export_csv&@columns=id,name" # Getting the priority of each issue with the filter status ,keywords ,priority SEARCH_ROUNDUP_PRIO = "issue?@action=export_csv&@columns=priority&@filter=status,keyword,priority&@pagesize=500&@startwith=0&status={status_values}&keyword={keyword_values}&priority={priority_values}" # Getting the status of each issue with the filter keywords, status SEARCH_ROUNDUP_STATUS = "issue?@action=export_csv&@columns=status&@filter=keyword&@pagesize=500&@startwith=0&keyword={keyword_values}&status={status_values}" def connect_to_server(params, baseurl): enc_data = urllib.parse.urlencode(params).encode() cj = http.cookiejar.CookieJar() opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj)) req = urllib.request.Request(url=baseurl, data=enc_data) opener.open(req) return opener def get_csv_from_server(opener, roundup_url, sub_url): csv_req = urllib.request.Request(url=roundup_url+sub_url) f = opener.open(csv_req) csv_reader = csv.DictReader(io.TextIOWrapper(f)) return csv_reader def check_create_database(database_file, sql_create_db): if not os.path.isfile(database_file): con = None cur = None try: con = db.connect(database_file) cur = con.cursor() try: cur.execute(sql_create_db) con.commit() os.chmod(database_file, 0o644) except: con.rollback() raise finally: if cur: cur.close() if con: con.close() def get_ids(opener, baseurl, parameter, url, include_no_prio=False): if parameter == [""]: return ("", []) parameter_csv = get_csv_from_server(opener, baseurl, url) parameter_dict = {} for x in parameter_csv: parameter_dict[x["name"]] = x["id"] if include_no_prio: parameter_dict["None"] = "-1" parameter_ids = [] for x in parameter: if x not in parameter_dict: print('The parameter "%s" does not exist in the tracker.' % x) sys.exit(0) parameter_ids.append(parameter_dict[x]) return (",".join(parameter_ids), parameter_ids) def issues_to_quantities(issue_csv, columns_ids): """Count issues per priority. Returns: a list of ints, containing how often a prio occurred [:-1] in order of the priorities, with the last being the "None" prio """ order_dict = {} z = 0 for x in columns_ids: order_dict[x] = z z += 1 quantities = [0] * z for issue in issue_csv: priority = issue[issue_csv.fieldnames[0]] if priority in order_dict: if priority.isdigit() == True : quantities[order_dict[priority]] += 1 else: quantities[-1] += 1 #print("quantities : " + str(quantities)) return quantities def save_issues_to_db(quantities, database_file, sql_create_db, sql_insert_in_db): check_create_database(database_file, sql_create_db) cur = None con = None try: con = db.connect(database_file) cur = con.cursor() try: cur.execute(sql_insert_in_db, quantities) con.commit() except: con.rollback() raise finally: if cur: cur.close() if con: con.close() def save_stats_in_db(search, login_parmeters, baseurl, db_file, columns, sql_create_db, sql_insert_in_db, keywords, status, include_no_prio): try: opener = connect_to_server(login_parmeters, baseurl) keywords_ids_url, _ = get_ids(opener, baseurl, keywords, CHECK_KEYWORD_VALUES) if search == "prio": status_ids_url, _ = get_ids(opener, baseurl, status, CHECK_STATUS_VALUES, include_no_prio) prio_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_PRIO_VALUES, include_no_prio) formated_search_url = SEARCH_ROUNDUP_PRIO.format(status_values=status_ids_url, keyword_values=keywords_ids_url, priority_values=prio_ids_url) elif search == "status": status_ids_url, columns_ids = get_ids(opener, baseurl, columns, CHECK_STATUS_VALUES) formated_search_url = SEARCH_ROUNDUP_STATUS.format(keyword_values=keywords_ids_url, status_values=status_ids_url) current_issues_csv = get_csv_from_server(opener, baseurl, formated_search_url) opener.close() #print(baseurl + formated_search_url) quantities = issues_to_quantities(current_issues_csv, columns_ids) save_issues_to_db(quantities, db_file, sql_create_db, sql_insert_in_db) except urllib.error.URLError as e: print("No Valid Connection to server : " + baseurl + "\nerror: " + str(e))