summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGELOG12
-rw-r--r--Makefile7
-rw-r--r--Reflector.py482
-rw-r--r--man/reflector.1.gzbin892 -> 892 bytes
-rwxr-xr-xreflector6
-rw-r--r--setup.py15
6 files changed, 383 insertions, 139 deletions
diff --git a/CHANGELOG b/CHANGELOG
index 09ee7a9..12e1177 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,15 @@
+# 2020-12-20
+* Added support for setting country sort order with the "--country" option.
+* Added headers to table list displayed with "--list-countries".
+
+# 2020-12-07
+* Restored download timeout option.
+
+# 2020-12-03
+* Removed thread support from mirror speed test to avoid skewing results on saturated connections.
+* Changed the speed test target file to the community database for more reliable results.
+* Addressed some pylint warnings.
+
# 2020-08-20
* Added support for comma-separated values in list arguments (country, protocol).
* Added support for argument files with the "@" prefix.
diff --git a/Makefile b/Makefile
index afd9ce3..4790ccc 100644
--- a/Makefile
+++ b/Makefile
@@ -5,6 +5,9 @@ dist:
gpg --detach-sign --use-agent reflector32-$(V).tar.gz
upload:
- scp reflector32-$(V).tar.gz reflector32-$(V).tar.gz.sig sources.archlinux32.org:sources/
+ scp reflector32-$(V).tar.gz reflector32-$(V).tar.gz.sig sources.archlinux32.org:httpdocs/sources/
-.PHONY: dist upload
+clean:
+ rm reflector32-$(V).tar.gz reflector32-$(V).tar.gz.sig
+
+.PHONY: dist upload clean
diff --git a/Reflector.py b/Reflector.py
index 65e1f76..dee77dd 100644
--- a/Reflector.py
+++ b/Reflector.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python3
-# -*- encoding: utf-8 -*-
-# Ignore the invalid snake-case error for the module name.
-# pylint: disable=invalid-name
+
+# Ignore the invalid snake-case error for the module name and the number of
+# lines.
+# pylint: disable=invalid-name,too-many-lines
# Copyright (C) 2012-2020 Xyne
#
@@ -31,16 +32,16 @@ import http.client
import itertools
import json
import logging
+import multiprocessing
import os
import pipes
-import queue
import re
import shlex
import socket
import subprocess
+import signal
import sys
import tempfile
-import threading
import time
import urllib.error
import urllib.request
@@ -55,19 +56,19 @@ DISPLAY_TIME_FORMAT = '%Y-%m-%d %H:%M:%S UTC'
PARSE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
PARSE_TIME_FORMAT_WITH_USEC = '%Y-%m-%dT%H:%M:%S.%fZ'
-DB_SUBPATH = 'i686/core/core.db'
+DB_SUBPATH = 'i686/community/community.db'
MIRROR_URL_FORMAT = '{0}{2}/{1}'
MIRRORLIST_ENTRY_FORMAT = "Server = " + MIRROR_URL_FORMAT + "\n"
DEFAULT_CONNECTION_TIMEOUT = 5
+DEFAULT_DOWNLOAD_TIMEOUT = 5
DEFAULT_CACHE_TIMEOUT = 300
-DEFAULT_N_THREADS = os.cpu_count()
SORT_TYPES = {
'age': 'last server synchronization',
'rate': 'download rate',
- 'country': 'server\'s location',
+ 'country': 'country name, either alphabetically or in the order given by the --country option',
'score': 'MirrorStatus score',
'delay': 'MirrorStatus delay',
}
@@ -129,7 +130,9 @@ def get_mirrorstatus(
return obj, mtime
except (IOError, urllib.error.URLError, socket.timeout) as err:
- raise MirrorStatusError(f'failed to retrieve mirrorstatus data: {err.__class__.__name__}: {err}')
+ raise MirrorStatusError(
+ f'failed to retrieve mirrorstatus data: {err.__class__.__name__}: {err}'
+ ) from err
# ------------------------------ Miscellaneous ------------------------------- #
@@ -168,11 +171,127 @@ def count_countries(mirrors):
return countries
+def country_sort_key(priorities):
+ '''
+ Return a sort key function based on a list of country priorities.
+
+ Args:
+ priorities:
+ The list of countries in the order of priority. Any countries not in
+ the list will be sorted alphabetically after the countries in the
+ list. The countries may be specified by name or country code.
+
+ Returns:
+ A key function to pass to sort().
+ '''
+ priorities = [country.upper() for country in priorities]
+ try:
+ default_priority = priorities.index('*')
+ except ValueError:
+ default_priority = len(priorities)
+
+ def key_func(mirror):
+ country = mirror['country'].upper()
+ code = mirror['country_code'].upper()
+
+ try:
+ return (priorities.index(country), country)
+ except ValueError:
+ pass
+
+ try:
+ return (priorities.index(code), country)
+ except ValueError:
+ pass
+
+ return (default_priority, country)
+
+ return key_func
+
+
+# ------------------------ download timeout handling ------------------------- #
+
+class DownloadTimeout(Exception):
+ '''
+ Download timeout exception raised by DownloadContext.
+ '''
+
+
+class DownloadTimer():
+ '''
+ Context manager for timing downloads with timeouts.
+ '''
+ def __init__(self, timeout=DEFAULT_DOWNLOAD_TIMEOUT):
+ '''
+ Args:
+ timeout:
+ The download timeout in seconds. The DownloadTimeout exception
+ will be raised in the context after this many seconds.
+ '''
+ self.time = None
+ self.start_time = None
+ self.timeout = timeout
+ self.previous_handler = None
+ self.previous_timer = None
+
+ def raise_timeout(self, signl, frame):
+ '''
+ Raise the DownloadTimeout exception.
+ '''
+ raise DownloadTimeout(f'Download timed out after {self.timeout} second(s).')
+
+ def __enter__(self):
+ self.start_time = time.time()
+ if self.timeout > 0:
+ self.previous_handler = signal.signal(signal.SIGALRM, self.raise_timeout)
+ self.previous_timer = signal.alarm(self.timeout)
+ return self
+
+ def __exit__(self, typ, value, traceback):
+ time_delta = time.time() - self.start_time
+ signal.alarm(0)
+ self.time = time_delta
+ if self.timeout > 0:
+ signal.signal(signal.SIGALRM, self.previous_handler)
+
+ previous_timer = self.previous_timer
+ if previous_timer > 0:
+ remaining_time = int(previous_timer - time_delta)
+ # The alarm should have been raised during the download.
+ if remaining_time <= 0:
+ signal.raise_signal(signal.SIGALRM)
+ else:
+ signal.alarm(remaining_time)
+ self.start_time = None
+
+
# --------------------------------- Sorting ---------------------------------- #
-def sort(mirrors, by=None, n_threads=DEFAULT_N_THREADS): # pylint: disable=invalid-name
+def sort(mirrors, by=None, key=None, **kwargs): # pylint: disable=invalid-name
'''
Sort mirrors by different criteria.
+
+ Args:
+ mirrors:
+ The iterable of mirrors to sort. This will be converted to a list.
+
+ by:
+ A mirrorstatus field by which to sort the mirrors, or one of the
+ following:
+
+ * age - Sort the mirrors by their last synchronization.
+ * rate - Sort the mirrors by download rate.
+
+ key:
+ A custom sorting function that accepts mirrors and returns a sort
+ key. If given, it will override the "by" parameter.
+
+ **kwargs:
+ Keyword arguments that are passed through to rate() when "by" is
+ "rate".
+
+ Returns:
+ The sorted mirrors as a list.
'''
# Ensure that "mirrors" is a list that can be sorted.
if not isinstance(mirrors, list):
@@ -182,21 +301,30 @@ def sort(mirrors, by=None, n_threads=DEFAULT_N_THREADS): # pylint: disable=inva
mirrors.sort(key=lambda m: m['last_sync'], reverse=True)
elif by == 'rate':
- rates = rate(mirrors, n_threads=n_threads)
+ rates = rate(mirrors, **kwargs)
mirrors = sorted(mirrors, key=lambda m: rates[m['url']], reverse=True)
else:
+ if key is None:
+ def key(mir):
+ return mir[by]
try:
- mirrors.sort(key=lambda m: m[by])
- except KeyError:
- raise MirrorStatusError('attempted to sort mirrors by unrecognized criterion: "{}"'.format(by))
+ mirrors.sort(key=key)
+ except KeyError as err:
+ raise MirrorStatusError(
+ 'attempted to sort mirrors by unrecognized criterion: "{}"'.format(by)
+ ) from err
return mirrors
# ---------------------------------- Rating ---------------------------------- #
-def rate_rsync(db_url, connection_timeout=DEFAULT_CONNECTION_TIMEOUT):
+def rate_rsync(
+ db_url,
+ connection_timeout=DEFAULT_CONNECTION_TIMEOUT,
+ download_timeout=DEFAULT_DOWNLOAD_TIMEOUT
+):
'''
Download a database via rsync and return the time and rate of the download.
'''
@@ -208,116 +336,138 @@ def rate_rsync(db_url, connection_timeout=DEFAULT_CONNECTION_TIMEOUT):
]
try:
with tempfile.TemporaryDirectory() as tmpdir:
- time_0 = time.time()
- subprocess.check_call(
- rsync_cmd + [tmpdir],
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL
- )
- time_delta = time.time() - time_0
+ with DownloadTimer(timeout=download_timeout) as timer:
+ subprocess.check_call(
+ rsync_cmd + [tmpdir],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL
+ )
+ time_delta = timer.time
size = os.path.getsize(
os.path.join(tmpdir, os.path.basename(DB_SUBPATH))
)
ratio = size / time_delta
return time_delta, ratio
- except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
+ except (
+ subprocess.CalledProcessError,
+ subprocess.TimeoutExpired,
+ FileNotFoundError,
+ DownloadTimeout
+ ) as err:
+ logger = get_logger()
+ logger.warning('failed to rate rsync download (%s): %s', db_url, err)
return 0, 0
-def rate_http(db_url, connection_timeout=DEFAULT_CONNECTION_TIMEOUT):
+def rate_http(
+ db_url,
+ connection_timeout=DEFAULT_CONNECTION_TIMEOUT,
+ download_timeout=DEFAULT_DOWNLOAD_TIMEOUT
+):
'''
Download a database via any protocol supported by urlopen and return the time
and rate of the download.
'''
req = urllib.request.Request(url=db_url)
try:
- with urllib.request.urlopen(req, None, connection_timeout) as handle:
- time_0 = time.time()
+ with urllib.request.urlopen(req, None, connection_timeout) as handle, \
+ DownloadTimer(timeout=download_timeout) as timer:
size = len(handle.read())
- time_delta = time.time() - time_0
+ time_delta = timer.time
ratio = size / time_delta
return time_delta, ratio
- except (OSError, urllib.error.HTTPError, http.client.HTTPException):
+ except (
+ OSError,
+ urllib.error.HTTPError,
+ http.client.HTTPException,
+ DownloadTimeout
+ ) as err:
+ logger = get_logger()
+ logger.warning('failed to rate http(s) download (%s): %s', db_url, err)
return 0, 0
-def rate(mirrors, n_threads=DEFAULT_N_THREADS, connection_timeout=DEFAULT_CONNECTION_TIMEOUT):
+def _rate_unthreaded(mirrors, fmt, kwargs):
'''
- Rate mirrors by timing the download the core repo's database for each one.
+ Rate mirrors without using threads.
'''
- # Ensure that mirrors is not a generator so that its length can be determined.
- if not isinstance(mirrors, tuple):
- mirrors = tuple(mirrors)
+ logger = get_logger()
+ rates = dict()
+ for mir in mirrors:
+ url = mir['url']
+ db_url = url + DB_SUBPATH
+ scheme = urllib.parse.urlparse(url).scheme
- if not mirrors:
- return None
+ if scheme == 'rsync':
+ time_delta, ratio = rate_rsync(db_url, **kwargs)
+ else:
+ time_delta, ratio = rate_http(db_url, **kwargs)
- # At least 1 thread and not more than the number of mirrors.
- n_threads = max(1, min(n_threads, len(mirrors)))
+ kibps = ratio / 1024.0
+ logger.info(fmt.format(url, kibps, time_delta))
+ rates[url] = ratio
+ return rates
- # URL input queue.
- q_in = queue.Queue()
- # URL, elapsed time and rate output queue.
- q_out = queue.Queue()
- def worker():
- while True:
- # To stop a thread, an integer will be inserted in the input queue. Each
- # thread will increment it and re-insert it until it equals the
- # threadcount. After encountering the integer, the thread exits the loop.
- url = q_in.get()
+def _rate_wrapper(func, url, kwargs):
+ '''
+ Wrapper function for multithreaded rating.
+ '''
+ time_delta, ratio = func(url + DB_SUBPATH, **kwargs)
+ return url, time_delta, ratio
- if isinstance(url, int):
- if url < n_threads:
- q_in.put(url + 1)
- else:
- db_url = url + DB_SUBPATH
- scheme = urllib.parse.urlparse(url).scheme
+def _rate_threaded(mirrors, fmt, n_threads, kwargs): # pylint: disable=too-many-locals
+ '''
+ Rate mirrors using threads.
+ '''
+ args = list()
+ for mir in mirrors:
+ url = mir['url']
+ scheme = urllib.parse.urlparse(url).scheme
+ rfunc = rate_rsync if scheme == 'rsync' else rate_http
+ args.append((rfunc, url, kwargs))
- if scheme == 'rsync':
- time_delta, ratio = rate_rsync(db_url, connection_timeout)
- else:
- time_delta, ratio = rate_http(db_url, connection_timeout)
+ logger = get_logger()
+ rates = dict()
+ with multiprocessing.Pool(n_threads) as pool:
+ for url, time_delta, ratio in pool.starmap(_rate_wrapper, args):
+ kibps = ratio / 1024.0
+ logger.info(fmt.format(url, kibps, time_delta))
+ rates[url] = ratio
+ return rates
- q_out.put((url, time_delta, ratio))
- q_in.task_done()
+def rate(
+ mirrors,
+ n_threads=0,
+ **kwargs
+):
+ '''
+ Rate mirrors by timing the download of the community repo's database from
+ each one. Keyword arguments are passed through to rate_rsync and rate_http.
+ '''
+ # Ensure that mirrors is not a generator so that its length can be determined.
+ if not isinstance(mirrors, tuple):
+ mirrors = tuple(mirrors)
- workers = tuple(threading.Thread(target=worker) for _ in range(n_threads))
- for wkr in workers:
- wkr.daemon = True
- wkr.start()
+ if not mirrors:
+ return None
- url_len = max(len(m['url']) for m in mirrors)
logger = get_logger()
- for mir in mirrors:
- url = mir['url']
- logger.info('rating %s', url)
- q_in.put(url)
-
- # To exit the threads.
- q_in.put(0)
- q_in.join()
+ logger.info('rating %s mirror(s) by download speed', len(mirrors))
+ url_len = max(len(mir['url']) for mir in mirrors)
header_fmt = '{{:{:d}s}} {{:>14s}} {{:>9s}}'.format(url_len)
logger.info(header_fmt.format('Server', 'Rate', 'Time'))
fmt = '{{:{:d}s}} {{:8.2f}} KiB/s {{:7.2f}} s'.format(url_len)
- # Loop over the mirrors just to ensure that we get the rate for each mirror.
- # The value in the loop does not (necessarily) correspond to the mirror.
- rates = dict()
- for _ in mirrors:
- url, dtime, ratio = q_out.get()
- kibps = ratio / 1024.0
- logger.info(fmt.format(url, kibps, dtime))
- rates[url] = ratio
- q_out.task_done()
-
- return rates
+ if n_threads > 0:
+ return _rate_threaded(mirrors, fmt, n_threads, kwargs)
+ return _rate_unthreaded(mirrors, fmt, kwargs)
-# ---------------------------- MirrorStatusError ----------------------------- #
+# -------------------------------- Exceptions -------------------------------- #
class MirrorStatusError(Exception):
'''
@@ -325,7 +475,7 @@ class MirrorStatusError(Exception):
'''
def __init__(self, msg):
- super(MirrorStatusError, self).__init__()
+ super().__init__()
self.msg = msg
def __str__(self):
@@ -334,7 +484,7 @@ class MirrorStatusError(Exception):
# ---------------------------- MirrorStatusFilter ---------------------------- #
-class MirrorStatusFilter(): # pylint: disable=too-many-instance-attributes
+class MirrorStatusFilter(): # pylint: disable=too-many-instance-attributes,too-few-public-methods
'''
Filter mirrors by different criteria.
'''
@@ -350,7 +500,7 @@ class MirrorStatusFilter(): # pylint: disable=too-many-instance-attributes
isos=False,
ipv4=False,
ipv6=False
- ):
+ ): # pylint: disable=too-many-arguments
self.min_completion_pct = min_completion_pct
self.countries = tuple(c.upper() for c in countries) if countries else tuple()
self.protocols = protocols
@@ -375,10 +525,11 @@ class MirrorStatusFilter(): # pylint: disable=too-many-instance-attributes
mirrors = (m for m in mirrors if m['completion_pct'] >= self.min_completion_pct)
# Filter by countries.
- if self.countries:
+ countries = self.countries
+ if countries and '*' not in countries:
mirrors = (
m for m in mirrors
- if m['country'].upper() in self.countries or m['country_code'].upper() in self.countries
+ if m['country'].upper() in countries or m['country_code'].upper() in countries
)
# Filter by protocols.
@@ -417,7 +568,13 @@ class MirrorStatusFilter(): # pylint: disable=too-many-instance-attributes
# -------------------------------- Formatting -------------------------------- #
-def format_mirrorlist(mirror_status, mtime, include_country=False, command=None, url=URL):
+def format_mirrorlist(
+ mirror_status,
+ mtime,
+ include_country=False,
+ command=None,
+ url=URL
+): # pylint: disable=too-many-locals
'''
Format the mirrorlist.
'''
@@ -485,8 +642,9 @@ class MirrorStatus():
importers of this module.
'''
- # TODO: move these to another module or remove them completely
- # Related: https://bugs.archlinux.org/task/32895
+ # TODO:
+ # Move these to another module or remove them completely Related:
+ # https://bugs.archlinux.org/task/32895
REPOSITORIES = (
'community',
'community-staging',
@@ -505,19 +663,21 @@ class MirrorStatus():
def __init__(
self,
connection_timeout=DEFAULT_CONNECTION_TIMEOUT,
+ download_timeout=DEFAULT_DOWNLOAD_TIMEOUT,
cache_timeout=DEFAULT_CACHE_TIMEOUT,
min_completion_pct=1.0,
- threads=DEFAULT_N_THREADS,
+ n_threads=0,
url=URL
- ):
+ ): # pylint: disable=too-many-arguments
self.connection_timeout = connection_timeout
+ self.download_timeout = download_timeout
self.cache_timeout = cache_timeout
self.min_completion_pct = min_completion_pct
- self.threads = threads
self.url = url
self.mirror_status = None
self.ms_mtime = 0
+ self.n_threads = n_threads
def retrieve(self):
'''
@@ -545,8 +705,8 @@ class MirrorStatus():
obj = self.get_obj()
try:
return obj['urls']
- except KeyError:
- raise MirrorStatusError('no mirrors detected in mirror status output')
+ except KeyError as err:
+ raise MirrorStatusError('no mirrors detected in mirror status output') from err
def filter(self, mirrors=None, **kwargs):
'''
@@ -557,21 +717,21 @@ class MirrorStatus():
msf = MirrorStatusFilter(min_completion_pct=self.min_completion_pct, **kwargs)
yield from msf.filter_mirrors(mirrors)
- def sort(self, mirrors=None, **kwargs):
+ def sort(self, mirrors, **kwargs):
'''
Sort mirrors by various criteria.
'''
if mirrors is None:
mirrors = self.get_mirrors()
- yield from sort(mirrors, n_threads=self.threads, **kwargs)
+ kwargs.setdefault('connection_timeout', self.connection_timeout)
+ kwargs.setdefault('download_timeout', self.download_timeout)
+ yield from sort(mirrors, n_threads=self.n_threads, **kwargs)
def rate(self, mirrors=None, **kwargs):
'''
Sort mirrors by download speed.
'''
- if mirrors is None:
- mirrors = self.get_mirrors()
- yield from sort(mirrors, n_threads=self.threads, by='rate', **kwargs)
+ yield from self.sort(mirrors, by='rate', n_threads=self.n_threads, **kwargs)
def get_mirrorlist(self, mirrors=None, include_country=False, cmd=None):
'''
@@ -582,7 +742,13 @@ class MirrorStatus():
if not isinstance(mirrors, list):
mirrors = list(mirrors)
obj['urls'] = mirrors
- return format_mirrorlist(obj, self.ms_mtime, include_country=include_country, command=cmd, url=self.url)
+ return format_mirrorlist(
+ obj,
+ self.ms_mtime,
+ include_country=include_country,
+ command=cmd,
+ url=self.url
+ )
def list_countries(self):
'''
@@ -602,9 +768,14 @@ class ListCountries(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ms = MirrorStatus(url=namespace.url) # pylint: disable=invalid-name
countries = ms.list_countries()
- width = max(len(c) for c, cc in countries)
- number = len(str(max(countries.values())))
- fmt = '{{:{:d}s}} {{}} {{:{:d}d}}'.format(width, number)
+ headers = ('Country', 'Code', 'Count')
+ widths = [len(h) for h in headers]
+ widths[0] = max(widths[0], max(len(c) for c, cc in countries))
+ widths[2] = max(widths[2], len(str(max(countries.values()))))
+ fmt = '{{:{:d}s}} {{:>{:d}s}} {{:{:d}d}}'.format(*widths)
+ hdr_fmt = fmt.replace('d', 's')
+ print(hdr_fmt.format(*headers))
+ print(' '.join('-' * w for w in widths))
for (ctry, count), nmbr in sorted(countries.items(), key=lambda x: x[0][0]):
print(fmt.format(ctry, count, nmbr))
sys.exit(0)
@@ -640,10 +811,10 @@ def add_arguments(parser):
help='The number of seconds to wait before a connection times out. Default: %(default)s'
)
-# parser.add_argument(
-# '--download-timeout', type=int, metavar='n',
-# help='The number of seconds to wait before a download times out. The threshold is checked after each chunk is read, so the actual timeout may take longer.'
-# )
+ parser.add_argument(
+ '--download-timeout', type=int, metavar='n', default=DEFAULT_DOWNLOAD_TIMEOUT,
+ help='The number of seconds to wait before a download times out. Default: %(default)s'
+ )
parser.add_argument(
'--list-countries', action=ListCountries, nargs=0,
@@ -652,12 +823,19 @@ def add_arguments(parser):
parser.add_argument(
'--cache-timeout', type=int, metavar='n', default=DEFAULT_CACHE_TIMEOUT,
- help='The cache timeout in seconds for the data retrieved from the Arch Linux Mirror Status API. The default is %(default)s.'
+ help=(
+ '''The cache timeout in seconds for the data retrieved from the Arch
+ Linux Mirror Status API. The default is %(default)s. '''
+ )
)
parser.add_argument(
'--url', default=URL,
- help='The URL from which to retrieve the mirror data in JSON format. If different from the default, it must follow the same format. Default: %(default)s'
+ help=(
+ '''The URL from which to retrieve the mirror data in JSON format. If
+ different from the default, it must follow the same format. Default:
+ %(default)s'''
+ )
)
parser.add_argument(
@@ -668,12 +846,19 @@ def add_arguments(parser):
sort_help = '; '.join('"{}": {}'.format(k, v) for k, v in SORT_TYPES.items())
parser.add_argument(
'--sort', choices=SORT_TYPES,
- help='Sort the mirrorlist. {}.'.format(sort_help)
+ help=f'Sort the mirrorlist. {sort_help}.'
)
parser.add_argument(
- '--threads', type=int, metavar='n', default=DEFAULT_N_THREADS,
- help='The maximum number of threads to use when rating mirrors. Keep in mind that this may skew your results if your connection is saturated. Default: %(default)s (number of detected CPUs)'
+ '--threads', metavar='n', type=int, default=0,
+ help=(
+ '''Use n threads for rating mirrors. This option will speed up the
+ rating step but the results will be inaccurate if the local
+ bandwidth is saturated at any point during the operation. If rating
+ takes too long without this option then you should probably apply
+ more filters to reduce the number of rated servers before using this
+ option.'''
+ )
)
parser.add_argument(
@@ -688,22 +873,52 @@ def add_arguments(parser):
filters = parser.add_argument_group(
'filters',
- 'The following filters are inclusive, i.e. the returned list will only contain mirrors for which all of the given conditions are met.'
+ '''The following filters are inclusive, i.e. the returned list will only
+ contain mirrors for which all of the given conditions are met.'''
)
filters.add_argument(
'-a', '--age', type=float, metavar='n',
- help='Only return mirrors that have synchronized in the last n hours. n may be an integer or a decimal number.'
+ help=(
+ '''Only return mirrors that have synchronized in the last n hours. n
+ may be an integer or a decimal number.'''
+ )
)
filters.add_argument(
- '-c', '--country', dest='countries', action='append', metavar='<country>',
- help='Match one of the given countries (case-sensitive). Multiple countries may be selected using commas (e.g. "France,Germany") or by passing this option multiple times. Use "--list-countries" to see which are available.'
+ '-c', '--country', dest='countries', action='append', metavar='<country name or code>',
+ help=(
+ '''Restrict mirrors to selected countries. Countries may be given by
+ name or country code, or a mix of both. The case is ignored.
+ Multiple countries may be selected using commas (e.g. --country
+ France,Germany) or by passing this option multiple times (e.g. -c
+ fr -c de). Use "--list-countries" to display a table of available
+ countries along with their country codes. When sorting by country,
+ this option may also be used to sort by a preferred order instead of
+ alphabetically. For example, to select mirrors from Sweden, Norway,
+ Denmark and Finland, in that order, use the options "--country
+ se,no,dk,fi --sort country". To set a preferred country sort order
+ without filtering any countries. this option also recognizes the
+ glob pattern "*", which will match any country. For example, to
+ ensure that any mirrors from Sweden are at the top of the list and
+ any mirrors from Denmark are at the bottom, with any other countries
+ in between, use "--country \'se,*,dk\' --sort country". It is
+ however important to note that when "*" is given along with other
+ filter criteria, there is no guarantee that certain countries will
+ be included in the results. For example, with the options "--country
+ \'se,*,dk\' --sort country --latest 10", the latest 10 mirrors may
+ all be from the United States. When the glob pattern is present, it
+ only ensures that if certain countries are included in the results,
+ they will be sorted in the requested order.'''
+ )
)
filters.add_argument(
'-f', '--fastest', type=int, metavar='n',
- help='Return the n fastest mirrors that meet the other criteria. Do not use this option without other filtering options.'
+ help=(
+ '''Return the n fastest mirrors that meet the other criteria. Do not
+ use this option without other filtering options.'''
+ )
)
filters.add_argument(
@@ -733,12 +948,20 @@ def add_arguments(parser):
filters.add_argument(
'-p', '--protocol', dest='protocols', action='append', metavar='<protocol>',
- help='Match one of the given protocols, e.g. "https" or "ftp". Multiple protocols may be selected using commas (e.g. "https,http") or by passing this option multiple times.'
+ help=(
+ '''Match one of the given protocols, e.g. "https" or "ftp". Multiple
+ protocols may be selected using commas (e.g. "https,http") or by
+ passing this option multiple times.'''
+ )
)
filters.add_argument(
'--completion-percent', type=float, metavar='[0-100]', default=100.,
- help='Set the minimum completion percent for the returned mirrors. Check the mirrorstatus webpage for the meaning of this parameter. Default value: %(default)s.'
+ help=(
+ '''Set the minimum completion percent for the returned mirrors.
+ Check the mirrorstatus webpage for the meaning of this parameter.
+ Default value: %(default)s.'''
+ )
)
filters.add_argument(
@@ -807,11 +1030,11 @@ def process_options(options, mirrorstatus=None, mirrors=None):
if not mirrorstatus:
mirrorstatus = MirrorStatus(
connection_timeout=options.connection_timeout,
- # download_timeout=options.download_timeout,
+ download_timeout=options.download_timeout,
cache_timeout=options.cache_timeout,
min_completion_pct=(options.completion_percent / 100.),
- threads=options.threads,
- url=options.url
+ url=options.url,
+ n_threads=options.threads
)
if mirrors is None:
@@ -843,7 +1066,10 @@ def process_options(options, mirrorstatus=None, mirrors=None):
mirrors = itertools.islice(mirrors, options.fastest)
if options.sort and not (options.sort == 'rate' and options.fastest):
- mirrors = mirrorstatus.sort(mirrors, by=options.sort)
+ if options.sort == 'country' and options.countries:
+ mirrors = mirrorstatus.sort(mirrors, key=country_sort_key(options.countries))
+ else:
+ mirrors = mirrorstatus.sort(mirrors, by=options.sort)
if options.number:
mirrors = list(mirrors)[:options.number]
@@ -896,14 +1122,14 @@ def main(args=None, configure_logging=False): # pylint: disable=too-many-branch
if mirrorlist is None:
sys.exit('error: no mirrors found')
except MirrorStatusError as err:
- sys.exit('error: {}\n'.format(err.msg))
+ sys.exit(f'error: {err.msg}')
if options.save:
try:
with open(options.save, 'w') as handle:
handle.write(mirrorlist)
except IOError as err:
- sys.exit('error: {}\n'.format(err.strerror))
+ sys.exit(f'error: {err.strerror}')
else:
print(mirrorlist)
diff --git a/man/reflector.1.gz b/man/reflector.1.gz
index 82b020b..680c67f 100644
--- a/man/reflector.1.gz
+++ b/man/reflector.1.gz
Binary files differ
diff --git a/reflector b/reflector
index ddfe468..1a87f81 100755
--- a/reflector
+++ b/reflector
@@ -1,2 +1,4 @@
-#!/bin/bash
-python3 -m Reflector "$@" \ No newline at end of file
+#!python
+import sys
+import Reflector
+sys.exit(Reflector.run_main(configure_logging=True))
diff --git a/setup.py b/setup.py
index 110738c..d1a90ed 100644
--- a/setup.py
+++ b/setup.py
@@ -4,11 +4,12 @@ from distutils.core import setup
import time
setup(
- name='''Reflector''',
- version=time.strftime('%Y.%m.%d.%H.%M.%S', time.gmtime(1599077629)),
- description='''A Python 3 module and script to retrieve and filter the latest Pacman mirror list.''',
- author='''Xyne''',
- author_email='''ac xunilhcra enyx, backwards''',
- url='''http://xyne.archlinux.ca/projects/reflector''',
- py_modules=['''Reflector'''],
+ name='Reflector',
+ version=time.strftime('%Y.%m.%d.%H.%M.%S', time.gmtime( 1617446608)),
+ description='''A Python 3 module and script to retrieve and filter the latest Pacman mirror list.''',
+ author='Xyne',
+ author_email='ac xunilhcra enyx, backwards',
+ url='''http://xyne.archlinux.ca/projects/reflector''',
+ py_modules=['Reflector'],
+ scripts=['reflector']
)