From e772b97eecd97a11152458810f8e8a076e4db29d Mon Sep 17 00:00:00 2001
From: Bryton Lacquement <bryton.lacquement@nexedi.com>
Date: Thu, 13 Sep 2018 16:31:27 +0200
Subject: [PATCH] fixup! erp5.util: add support for Python 3

31804f683fd36322fb38aeb9654bee70cebe4fdb was merged too soon.

/reviewed-on https://lab.nexedi.com/nexedi/erp5/merge_requests/913
---
 erp5/util/benchmark/argument.py               |  2 +-
 erp5/util/benchmark/performance_tester.py     |  4 ++--
 erp5/util/benchmark/process.py                | 12 +++++-----
 erp5/util/benchmark/report.py                 | 19 +++++++++-------
 erp5/util/benchmark/result.py                 |  2 +-
 erp5/util/benchmark/scalability_tester.py     | 10 ++++-----
 .../scalability/runScalabilityTestSuite.py    |  8 ++++---
 erp5/util/taskdistribution/__init__.py        |  2 +-
 erp5/util/testbrowser/browser.py              | 16 +++++---------
 .../util/testbrowser/examples/createPerson.py |  3 ++-
 erp5/util/testnode/ScalabilityTestRunner.py   |  4 ++--
 .../util/testnode/SlapOSMasterCommunicator.py |  2 +-
 erp5/util/timinglogparser/__init__.py         | 22 +++++++++----------
 erp5/util/timinglogplotter/__init__.py        |  6 ++---
 erp5/util/webchecker/__init__.py              |  3 ++-
 15 files changed, 59 insertions(+), 56 deletions(-)

diff --git a/erp5/util/benchmark/argument.py b/erp5/util/benchmark/argument.py
index 3dd4efe848..d944aab4a5 100644
--- a/erp5/util/benchmark/argument.py
+++ b/erp5/util/benchmark/argument.py
@@ -57,7 +57,7 @@ class ArgumentType(object):
 
     try:
       module = __import__(module_name, globals(), locals(), [object_name], -1)
-    except Exception, e:
+    except Exception as e:
       raise argparse.ArgumentTypeError("Cannot import '%s.%s': %s" % \
                                          (module_name, object_name, str(e)))
 
diff --git a/erp5/util/benchmark/performance_tester.py b/erp5/util/benchmark/performance_tester.py
index c821849b76..6871fdb483 100755
--- a/erp5/util/benchmark/performance_tester.py
+++ b/erp5/util/benchmark/performance_tester.py
@@ -264,7 +264,7 @@ class PerformanceTester(object):
       try:
         error_message = exit_msg_queue.get()
 
-      except KeyboardInterrupt, e:
+      except KeyboardInterrupt as e:
         print("\nInterrupted by user, stopping gracefully...", file=sys.stderr)
         exit_status = 2
 
@@ -272,7 +272,7 @@ class PerformanceTester(object):
       # blocking system call above and the system call should not be restarted
       # (using siginterrupt), otherwise the  process will stall forever as its
       # child has already exited
-      except IOError, e:
+      except IOError as e:
         if e.errno == errno.EINTR:
           continue
 
diff --git a/erp5/util/benchmark/process.py b/erp5/util/benchmark/process.py
index e47d3539ca..47828452d7 100644
--- a/erp5/util/benchmark/process.py
+++ b/erp5/util/benchmark/process.py
@@ -88,13 +88,13 @@ class BenchmarkProcess(multiprocessing.Process):
         target(result, self._browser)
       except StopIteration:
         raise
-      except Exception, e:
+      except Exception as e:
         self._logger.info("Exception while running target suite for user %s: %s" % (self._browser._username, str(e)))
         msg = "%s: %s" % (target, traceback.format_exc())
         try:
           msg += "Last response headers:\n%s\nLast response contents:\n%s" % \
               (self._browser.headers, self._browser.contents)
-        except:
+        except Exception:
           pass
 
         self._error_counter += 1
@@ -123,7 +123,7 @@ class BenchmarkProcess(multiprocessing.Process):
 
       try:
         self._logger.info(str(result.getCurrentSuiteUseCaseStat()))
-      except:
+      except Exception:
         pass
 
     result.iterationFinished()
@@ -183,14 +183,14 @@ class BenchmarkProcess(multiprocessing.Process):
             runIteration(result)
             self._current_repeat += 1
 
-    except StopIteration, e:
+    except StopIteration as e:
       self._logger.error(e)
 
-    except RuntimeError, e:
+    except RuntimeError as e:
       exit_msg = str(e)
       exit_status = 1
 
-    except BaseException, e:
+    except BaseException as e:
       exit_msg = traceback.format_exc()
       self._logger.error(exit_msg)
       exit_status = 2
diff --git a/erp5/util/benchmark/report.py b/erp5/util/benchmark/report.py
index 0755b422a4..70147602e3 100755
--- a/erp5/util/benchmark/report.py
+++ b/erp5/util/benchmark/report.py
@@ -34,6 +34,7 @@
 from __future__ import print_function
 import argparse
 import re
+import six
 
 def parseArguments():
   parser = argparse.ArgumentParser(
@@ -98,13 +99,15 @@ def computeStatisticFromFilenameList(argument_namespace, filename_list,
   merged_label_dict = {}
 
   for filename in filename_list:
-    reader = csv.reader(open(filename, 'rb'), delimiter=',',
+    reader = csv.reader(open(filename, 'r'), delimiter=',',
                         quoting=csv.QUOTE_MINIMAL)
-
     reader_list.append(reader)
 
     # Get headers
-    row_list = [ unicode(row, 'utf-8') for row in reader.next() ]
+    if str is bytes:
+      row_list = [row.decode('utf-8') for row in next(reader)]
+    else:
+      row_list = [list(next(reader))]
     if not label_list:
       label_list = row_list
       label_merged_index = 0
@@ -156,8 +159,8 @@ def computeStatisticFromFilenameList(argument_namespace, filename_list,
             report_dict['results'].setdefault(stat.full_label, []).append(stat)
 
     if row_list != label_list:
-      raise AssertionError, "ERROR: Result labels: %s != %s" % \
-          (label_list, row_list)
+      raise AssertionError("ERROR: Result labels: %s != %s" %
+          (label_list, row_list))
 
     iteration_index = 0
     for row_list in reader:
@@ -169,7 +172,7 @@ def computeStatisticFromFilenameList(argument_namespace, filename_list,
         use_case_suite = row_use_case_mapping_dict.get(idx, None)
         if use_case_suite:
           current_count = int(row)
-          current_duration = float(row_iter.next()[1]) / 3600.0
+          current_duration = float(next(row_iter)[1]) / 3600
           if not current_count:
             continue
 
@@ -587,8 +590,8 @@ def generateReport():
         (nb_users_list[0],
          nb_users_list[-1])
 
-    for suite_name, report_dict in range_user_report_dict.iteritems():
-      for label, stat_list in report_dict['results'].iteritems():
+    for suite_name, report_dict in six.iteritems(range_user_report_dict):
+      for label, stat_list in six.iteritems(report_dict['results']):
         drawConcurrentUsersPlot(
           pdf,
           title_fmt % label,
diff --git a/erp5/util/benchmark/result.py b/erp5/util/benchmark/result.py
index f7724863c5..30cd0bc410 100644
--- a/erp5/util/benchmark/result.py
+++ b/erp5/util/benchmark/result.py
@@ -334,7 +334,7 @@ class CSVBenchmarkResult(BenchmarkResult):
 
 from cStringIO import StringIO
 
-import xmlrpclib
+from six.moves import xmlrpc_client as xmlrpclib
 import datetime
 
 class ERP5BenchmarkResult(BenchmarkResult):
diff --git a/erp5/util/benchmark/scalability_tester.py b/erp5/util/benchmark/scalability_tester.py
index 37e47b3963..0e4ae755b6 100644
--- a/erp5/util/benchmark/scalability_tester.py
+++ b/erp5/util/benchmark/scalability_tester.py
@@ -51,16 +51,16 @@ class ScalabilityTester(PerformanceTester):
   def postRun(self, error_message_set):
     from logging import Formatter
     import sys
-    import urllib
-    import urllib2
+    from six.moves.urllib.request import urlencode
+    from six.moves.urllib.parse import urlopen
 
     try:
-      urllib2.urlopen("http://[%s]:%d/report" % \
+      urlopen("http://[%s]:%d/report" % \
                         (self._argument_namespace.manager_address,
                          self._argument_namespace.manager_port),
-                      urllib.urlencode({'error_message_set': '|'.join(error_message_set)})).close()
+                      urlencode({'error_message_set': '|'.join(error_message_set)})).close()
 
-    except:
+    except Exception:
       print("ERROR: %s" % Formatter().formatException(sys.exc_info()), file=sys.stderr)
 
   def getResultClass(self):
diff --git a/erp5/util/scalability/runScalabilityTestSuite.py b/erp5/util/scalability/runScalabilityTestSuite.py
index a8767cdb42..b6f10b376e 100644
--- a/erp5/util/scalability/runScalabilityTestSuite.py
+++ b/erp5/util/scalability/runScalabilityTestSuite.py
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import division
+
 import argparse
 import os
 import shutil
@@ -253,7 +255,7 @@ class ScalabilityLauncher(object):
       log_file_name_prefix = "%s_%s_suite_%s" %(LOG_FILE_PREFIX, current_test.title, test_suite)
       command_list.append([tester_path,
                            instance_url,
-                           str(user_quantity/len(test_suite_list)),
+                           str(user_quantity//len(test_suite_list)),
                            test_suite,
                            '--benchmark-path-list', benchmarks_path,
                            '--users-file-path', user_file_path,
@@ -264,7 +266,7 @@ class ScalabilityLauncher(object):
                            '--user-index', str(user_index),
                            "--duration", "%d"%test_duration,
                          ])
-      user_index += user_quantity/len(test_suite_list)
+      user_index += user_quantity//len(test_suite_list)
     # Launch commands
     exec_env = os.environ.copy()
     exec_env.update({'raise_error_if_fail': False})
@@ -298,7 +300,7 @@ class ScalabilityLauncher(object):
                     "tests=%s\n"\
                     "duration=%d\n"\
                     %(
-                      (user_quantity/len(test_suite_list))*len(test_suite_list),
+                      (user_quantity//len(test_suite_list))*len(test_suite_list),
                       len(test_suite_list),
                       '_'.join(test_suite_list),
                       test_duration
diff --git a/erp5/util/taskdistribution/__init__.py b/erp5/util/taskdistribution/__init__.py
index f0e6262a56..61442b0d9a 100644
--- a/erp5/util/taskdistribution/__init__.py
+++ b/erp5/util/taskdistribution/__init__.py
@@ -170,7 +170,7 @@ class TestResultLineProxy(RPCRetry):
         try:
           return bool(self._retryRPC('isTestCaseAlive',
                                      (self._test_result_line_path,)))
-        except:
+        except Exception:
           raise ValueError('isTestCaseAlive Failed.')
 
     def stop(self, test_count=None, error_count=None, failure_count=None,
diff --git a/erp5/util/testbrowser/browser.py b/erp5/util/testbrowser/browser.py
index 96760699b4..2971142137 100644
--- a/erp5/util/testbrowser/browser.py
+++ b/erp5/util/testbrowser/browser.py
@@ -32,8 +32,8 @@
 
 import logging
 import sys
-import urllib
-import Cookie
+from six.moves.urllib.parse import urlencode
+from six.moves import http_cookies as Cookie
 import re
 
 from zope.testbrowser._compat import urlparse
@@ -205,7 +205,7 @@ class Browser(ExtendedTestBrowser):
       url_or_path = urlparse.urljoin(self._erp5_base_url, url_or_path)
 
     if isinstance(data, dict):
-      data = urllib.urlencode(data)
+      data = urlencode(data)
 
     self._logger.debug("Opening: " + url_or_path)
     super(Browser, self).open(url_or_path, data)
@@ -274,7 +274,7 @@ class Browser(ExtendedTestBrowser):
           location_without_query_string, query_string = location.split('?')
           location = (
             location_without_query_string +
-            '?' + urllib.urlencode(urlparse.parse_qs(query_string,
+            '?' + urlencode(urlparse.parse_qs(query_string,
                                                      strict_parsing=True),
                                    doseq=True))
         # END: Bugfix
@@ -318,7 +318,7 @@ class Browser(ExtendedTestBrowser):
       url_or_path = urlparse.urljoin(self._erp5_base_url, url_or_path)
 
     if isinstance(data, dict):
-      data = urllib.urlencode(data)
+      data = urlencode(data)
 
     url = self._absoluteUrl(url_or_path)
     self._logger.debug("Opening: " + url)
@@ -365,11 +365,7 @@ class Browser(ExtendedTestBrowser):
     @return: Cookie value
     @rtype: str
     """
-    for cookie_name, cookie_value in self.cookies.iteritems():
-      if name == cookie_name:
-        return cookie_value
-
-    return default
+    return self.cookies.get(name, default)
 
   @property
   def mainForm(self):
diff --git a/erp5/util/testbrowser/examples/createPerson.py b/erp5/util/testbrowser/examples/createPerson.py
index 1dade9efc2..9560e60dd1 100755
--- a/erp5/util/testbrowser/examples/createPerson.py
+++ b/erp5/util/testbrowser/examples/createPerson.py
@@ -2,6 +2,7 @@
 # -*- coding: utf-8 -*-
 
 from __future__ import division, print_function
+import six
 from erp5.util.testbrowser.browser import Browser
 
 ITERATION = 20
@@ -89,5 +90,5 @@ if __name__ == '__main__':
     benchmarkAddPerson(counter, result_dict)
     counter += 1
 
-  for title, time_list in result_dict.iteritems():
+  for title, time_list in six.iteritems(result_dict):
     print("%s: %.4fs" % (title, sum(time_list) / ITERATION))
diff --git a/erp5/util/testnode/ScalabilityTestRunner.py b/erp5/util/testnode/ScalabilityTestRunner.py
index d917f05318..53484600ca 100644
--- a/erp5/util/testnode/ScalabilityTestRunner.py
+++ b/erp5/util/testnode/ScalabilityTestRunner.py
@@ -104,7 +104,7 @@ class ScalabilityTestRunner():
       self.slapos_url = self.testnode.taskdistribution.getSlaposUrl()
       if not self.slapos_url:
         self.slapos_url = self.testnode.config['server_url']
-    except:
+    except Exception:
       self.slapos_url = self.testnode.config['server_url']
     
     # Get Slapos Master url used for api rest (using hateoas)
@@ -458,7 +458,7 @@ Require valid-user
         suite_class = getattr(module, test_suite)
         suite = suite_class(**kwargs)
         repo_location = "%s/%s/" % (location, SCALABILITY_TEST)
-      except:
+      except Exception:
         pass
     return suite, repo_location
 
diff --git a/erp5/util/testnode/SlapOSMasterCommunicator.py b/erp5/util/testnode/SlapOSMasterCommunicator.py
index 2f79aac925..16952f7549 100644
--- a/erp5/util/testnode/SlapOSMasterCommunicator.py
+++ b/erp5/util/testnode/SlapOSMasterCommunicator.py
@@ -363,7 +363,7 @@ class SlapOSTester(SlapOSMasterCommunicator):
     def getInstanceGuid():
       try:
         return self.instance.getInstanceGuid()
-      except:
+      except Exception:
         return None
     frontend_master_ipv6 = None
     instance_guid = None
diff --git a/erp5/util/timinglogparser/__init__.py b/erp5/util/timinglogparser/__init__.py
index e4763d3b36..0e43ef3ec0 100755
--- a/erp5/util/timinglogparser/__init__.py
+++ b/erp5/util/timinglogparser/__init__.py
@@ -35,6 +35,7 @@ import imp
 import gzip
 import getopt
 from time import time
+import six
 
 PROFILING_ENABLED = False
 if PROFILING_ENABLED:
@@ -150,7 +151,7 @@ def parseFile(filename, measure_dict):
   if line_number > 0:
     duration = time() - begin
     print("Matched %i lines (%.2f%%), %i skipped (%.2f%%), %i unmatched (%.2f%%) in %.2fs (%i lines per second)." % \
-      (match_count, (match_count / line_number) * 100, skip_count, (skip_count / line_number) * 100, (line_number - match_count - skip_count), (1 - (match_count + skip_count) / line_number)) * 100, duration, line_number // duration),
+      (match_count, (match_count / line_number) * 100, skip_count, (skip_count / line_number) * 100, (line_number - match_count - skip_count), (1 - (match_count + skip_count) / line_number) * 100, duration, line_number // duration),
       file=sys.stderr)
 
 debug = False
@@ -209,9 +210,9 @@ if len(load_file_name_list):
     with open(load_file_name) as load_file:
       temp_measure_dict = eval(load_file.read(), {})
     assert isinstance(measure_dict, dict)
-    for filter_id, result_dict in temp_measure_dict.iteritems():
-      for result, date_dict in result_dict.iteritems():
-        for date, duration_list in date_dict.iteritems():
+    for filter_id, result_dict in six.iteritems(temp_measure_dict):
+      for result, date_dict in six.iteritems(result_dict):
+        for date, duration_list in six.iteritems(date_dict):
           measure_dict.setdefault(filter_id, {}).setdefault(result, {}).setdefault(date, []).extend(duration_list)
     print('Previous processing result restored from %r' % (load_file_name, ), file=sys.stderr)
 
@@ -231,18 +232,17 @@ if outfile_prefix is not None:
   append = measure_id_list.append
   sheet_dict = {}
   line_dict = {}
-  for match_id, match_dict in measure_dict.iteritems():
-    for result_id, result_dict in match_dict.iteritems():
+  for match_id, match_dict in six.iteritems(measure_dict):
+    for result_id, result_dict in six.iteritems(match_dict):
       measure_id = (match_id, result_id)
       sheet_dict.setdefault(match_id, []).append((result_id, measure_id))
       append(measure_id)
-      for date, measure_list in result_dict.iteritems():
+      for date, measure_list in six.iteritems(result_dict):
         first_level_dict = line_dict.setdefault(date, {})
         assert measure_id not in first_level_dict
         first_level_dict[measure_id] = measure_list
 
-  date_list = line_dict.keys()
-  date_list.sort(key=date_key)
+  date_list = sorted(line_dict, key=date_key)
 
   def render_cell(value_list, format):
     if isinstance(value_list, (list, tuple)):
@@ -251,7 +251,7 @@ if outfile_prefix is not None:
       return value_list
 
   def renderOutput(data_format, filename_suffix):
-    for sheet_id, sheet_column_list in sheet_dict.iteritems():
+    for sheet_id, sheet_column_list in six.iteritems(sheet_dict):
       outfile_name = '%s_%s_%s.csv' % (outfile_prefix, sheet_id, filename_suffix)
       print('Writing to %r...' % (outfile_name, ), file=sys.stderr)
       with open(outfile_name, 'w') as outfile:
@@ -259,7 +259,7 @@ if outfile_prefix is not None:
         decimate_dict = {}
         decimate = 0
         for date in date_list:
-          for key, value in line_dict[date].iteritems():
+          for key, value in six.iteritems(line_dict[date]):
             decimate_dict.setdefault(key, []).extend(value)
           decimate += 1
           if decimate == decimate_count:
diff --git a/erp5/util/timinglogplotter/__init__.py b/erp5/util/timinglogplotter/__init__.py
index 2351ae6019..74a9097b4d 100755
--- a/erp5/util/timinglogplotter/__init__.py
+++ b/erp5/util/timinglogplotter/__init__.py
@@ -84,9 +84,9 @@ class CSVFile(object):
             if cell > value_max.get(key, 0):
               value_max[key] = cell
         column_dict[key].append(cell)
-    line_num = line_num / 100
+    line_num /= 100
     for key in ratio_dict:
-      ratio_dict[key] //= line_num
+      ratio_dict[key] /= line_num
 
   def getColumn(self, column_id):
     return self.column_dict[self.column_list[column_id]]
@@ -136,7 +136,7 @@ def main():
     # date_list will be like ['2009/07/01', '2009/07/05', '2009/07/10', ...]
     factor = 1
     if len(date_string_list) > 20:
-      factor = int(len(date_string_list) // 20)
+      factor = len(date_string_list) // 20
     i = 0
     for date_string in date_string_list:
       if i % factor == 0:
diff --git a/erp5/util/webchecker/__init__.py b/erp5/util/webchecker/__init__.py
index cd255c2417..1a6c884544 100644
--- a/erp5/util/webchecker/__init__.py
+++ b/erp5/util/webchecker/__init__.py
@@ -39,6 +39,7 @@ import tempfile
 from datetime import datetime
 import threading
 import signal
+import six
 
 _MARKER = []
 
@@ -517,7 +518,7 @@ class HTTPCacheCheckerTestSuite(object):
     logging.info('End of second pass\n')
     if self.report_dict:
       report_message_list = ['*Errors*:']
-      for url, message_list in self.report_dict.iteritems():
+      for url, message_list in six.iteritems(self.report_dict):
         unique_message_list = []
         for message in message_list:
           if message not in unique_message_list:
-- 
2.30.9